use of org.apache.helix.integration.manager.ClusterControllerManager in project helix by apache.
the class TestHelixAdminScenariosRest method testInstanceOperations.
@Test
public void testInstanceOperations() throws Exception {
final String clusterName = "clusterTestInstanceOperations";
// setup cluster
addCluster(clusterName);
addInstancesToCluster(clusterName, "localhost:123", 6, null);
addResource(clusterName, "db_11", 8);
rebalanceResource(clusterName, "db_11");
ClusterControllerManager controller = new ClusterControllerManager(ZK_ADDR, clusterName, "controller_9900");
controller.syncStart();
// start mock nodes
Map<String, MockParticipantManager> participants = new HashMap<String, MockParticipantManager>();
for (int i = 0; i < 6; i++) {
String instanceName = "localhost_123" + i;
MockParticipantManager participant = new MockParticipantManager(ZK_ADDR, clusterName, instanceName);
participant.syncStart();
participants.put(instanceName, participant);
}
HelixDataAccessor accessor;
// drop node should fail as not disabled
String instanceUrl = getInstanceUrl(clusterName, "localhost_1232");
deleteUrl(instanceUrl, true);
// disabled node
String response = assertSuccessPostOperation(instanceUrl, enableInstanceCmd(false), false);
Assert.assertTrue(response.contains("false"));
// Cannot drop / swap
deleteUrl(instanceUrl, true);
String instancesUrl = getClusterUrl(clusterName) + "/instances";
response = assertSuccessPostOperation(instancesUrl, swapInstanceCmd("localhost_1232", "localhost_12320"), true);
// disconnect the node
participants.get("localhost_1232").syncStop();
// add new node then swap instance
response = assertSuccessPostOperation(instancesUrl, addInstanceCmd("localhost_12320"), false);
Assert.assertTrue(response.contains("localhost_12320"));
// swap instance. The instance get swapped out should not exist anymore
response = assertSuccessPostOperation(instancesUrl, swapInstanceCmd("localhost_1232", "localhost_12320"), false);
Assert.assertTrue(response.contains("localhost_12320"));
Assert.assertFalse(response.contains("localhost_1232\""));
accessor = participants.get("localhost_1231").getHelixDataAccessor();
String path = accessor.keyBuilder().instanceConfig("localhost_1232").getPath();
Assert.assertFalse(_gZkClient.exists(path));
MockParticipantManager newParticipant = new MockParticipantManager(ZK_ADDR, clusterName, "localhost_12320");
newParticipant.syncStart();
participants.put("localhost_12320", newParticipant);
boolean verifyResult = ClusterStateVerifier.verifyByZkCallback(new MasterNbInExtViewVerifier(ZK_ADDR, clusterName));
Assert.assertTrue(verifyResult);
// clean up
controller.syncStop();
for (MockParticipantManager participant : participants.values()) {
participant.syncStop();
}
}
use of org.apache.helix.integration.manager.ClusterControllerManager in project helix by apache.
the class TestHelixAdminScenariosRest method testExpandCluster.
@Test
public void testExpandCluster() throws Exception {
final String clusterName = "clusterTestExpandCluster";
// setup cluster
addCluster(clusterName);
addInstancesToCluster(clusterName, "localhost:123", 6, null);
addResource(clusterName, "db_11", 22);
rebalanceResource(clusterName, "db_11");
ClusterControllerManager controller = new ClusterControllerManager(ZK_ADDR, clusterName, "controller_9900");
controller.syncStart();
// start mock nodes
Map<String, MockParticipantManager> participants = new HashMap<String, MockParticipantManager>();
for (int i = 0; i < 6; i++) {
String instanceName = "localhost_123" + i;
MockParticipantManager participant = new MockParticipantManager(ZK_ADDR, clusterName, instanceName);
participant.syncStart();
participants.put(instanceName, participant);
}
boolean verifyResult = ClusterStateVerifier.verifyByZkCallback(new BestPossAndExtViewZkVerifier(ZK_ADDR, clusterName));
Assert.assertTrue(verifyResult);
String clusterUrl = getClusterUrl(clusterName);
String instancesUrl = clusterUrl + "/instances";
String instances = "localhost:12331;localhost:12341;localhost:12351;localhost:12361";
String response = assertSuccessPostOperation(instancesUrl, addInstanceCmd(instances), false);
String[] hosts = instances.split(";");
for (String host : hosts) {
Assert.assertTrue(response.contains(host.replace(':', '_')));
}
response = assertSuccessPostOperation(clusterUrl, expandClusterCmd(), false);
for (int i = 3; i <= 6; i++) {
String instanceName = "localhost_123" + i + "1";
MockParticipantManager participant = new MockParticipantManager(ZK_ADDR, clusterName, instanceName);
participant.syncStart();
participants.put(instanceName, participant);
}
verifyResult = ClusterStateVerifier.verifyByZkCallback(new MasterNbInExtViewVerifier(ZK_ADDR, clusterName));
Assert.assertTrue(verifyResult);
verifyResult = ClusterStateVerifier.verifyByZkCallback(new BestPossAndExtViewZkVerifier(ZK_ADDR, clusterName));
Assert.assertTrue(verifyResult);
// clean up
controller.syncStop();
for (MockParticipantManager participant : participants.values()) {
participant.syncStop();
}
}
use of org.apache.helix.integration.manager.ClusterControllerManager in project helix by apache.
the class TestResetInstance method testResetInstance.
@Test
public void testResetInstance() throws Exception {
String className = TestHelper.getTestClassName();
String methodName = TestHelper.getTestMethodName();
String clusterName = className + "_" + methodName;
final int n = 5;
System.out.println("START " + clusterName + " at " + new Date(System.currentTimeMillis()));
// participant port
TestHelper.setupCluster(// participant port
clusterName, // participant port
ZK_ADDR, // participant port
12918, // participant name prefix
"localhost", // resource name prefix
"TestDB", // resources
1, // partitions per resource
10, // number of nodes
n, // replicas
3, "MasterSlave", // do rebalance
true);
// start controller
ClusterControllerManager controller = new ClusterControllerManager(ZK_ADDR, clusterName, "controller_0");
controller.syncStart();
Map<String, Set<String>> errPartitions = new HashMap<String, Set<String>>() {
{
put("SLAVE-MASTER", TestHelper.setOf("TestDB0_4"));
put("OFFLINE-SLAVE", TestHelper.setOf("TestDB0_8"));
}
};
// start mock participants
MockParticipantManager[] participants = new MockParticipantManager[n];
for (int i = 0; i < n; i++) {
String instanceName = "localhost_" + (12918 + i);
if (i == 0) {
participants[i] = new MockParticipantManager(ZK_ADDR, clusterName, instanceName);
participants[i].setTransition(new ErrTransition(errPartitions));
} else {
participants[i] = new MockParticipantManager(ZK_ADDR, clusterName, instanceName);
}
participants[i].syncStart();
}
// verify cluster
Map<String, Map<String, String>> errStateMap = new HashMap<String, Map<String, String>>();
errStateMap.put("TestDB0", new HashMap<String, String>());
errStateMap.get("TestDB0").put("TestDB0_4", "localhost_12918");
errStateMap.get("TestDB0").put("TestDB0_8", "localhost_12918");
boolean result = ClusterStateVerifier.verifyByZkCallback((new ClusterStateVerifier.BestPossAndExtViewZkVerifier(ZK_ADDR, clusterName, errStateMap)));
Assert.assertTrue(result, "Cluster verification fails");
// reset node "localhost_12918"
participants[0].setTransition(null);
String hostName = "localhost_12918";
String instanceUrl = "http://localhost:" + ADMIN_PORT + "/clusters/" + clusterName + "/instances/" + hostName;
Map<String, String> paramMap = new HashMap<String, String>();
paramMap.put(JsonParameters.MANAGEMENT_COMMAND, ClusterSetup.resetInstance);
TestHelixAdminScenariosRest.assertSuccessPostOperation(instanceUrl, paramMap, false);
result = ClusterStateVerifier.verifyByZkCallback((new ClusterStateVerifier.BestPossAndExtViewZkVerifier(ZK_ADDR, clusterName)));
Assert.assertTrue(result, "Cluster verification fails");
// clean up
controller.syncStop();
for (int i = 0; i < 5; i++) {
participants[i].syncStop();
}
System.out.println("END " + clusterName + " at " + new Date(System.currentTimeMillis()));
}
use of org.apache.helix.integration.manager.ClusterControllerManager in project helix by apache.
the class TestResetPartitionState method testResetPartitionState.
@Test()
public void testResetPartitionState() throws Exception {
String className = TestHelper.getTestClassName();
String methodName = TestHelper.getTestMethodName();
String clusterName = className + "_" + methodName;
final int n = 5;
System.out.println("START " + clusterName + " at " + new Date(System.currentTimeMillis()));
// participant port
TestHelper.setupCluster(// participant port
clusterName, // participant port
ZK_ADDR, // participant port
12918, // participant name prefix
"localhost", // resource name prefix
"TestDB", // resources
1, // partitions per resource
10, // number of nodes
n, // replicas
3, "MasterSlave", // do rebalance
true);
// start controller
ClusterControllerManager controller = new ClusterControllerManager(ZK_ADDR, clusterName, "controller_0");
controller.syncStart();
Map<String, Set<String>> errPartitions = new HashMap<String, Set<String>>();
errPartitions.put("SLAVE-MASTER", TestHelper.setOf("TestDB0_4"));
errPartitions.put("OFFLINE-SLAVE", TestHelper.setOf("TestDB0_8"));
// start mock participants
MockParticipantManager[] participants = new MockParticipantManager[n];
for (int i = 0; i < n; i++) {
String instanceName = "localhost_" + (12918 + i);
if (i == 0) {
participants[i] = new MockParticipantManager(ZK_ADDR, clusterName, instanceName);
participants[i].setTransition(new ErrTransition(errPartitions));
} else {
participants[i] = new MockParticipantManager(ZK_ADDR, clusterName, instanceName);
}
participants[i].syncStart();
}
// verify cluster
Map<String, Map<String, String>> errStateMap = new HashMap<String, Map<String, String>>();
errStateMap.put("TestDB0", new HashMap<String, String>());
errStateMap.get("TestDB0").put("TestDB0_4", "localhost_12918");
errStateMap.get("TestDB0").put("TestDB0_8", "localhost_12918");
boolean result = ClusterStateVerifier.verifyByZkCallback((new ClusterStateVerifier.BestPossAndExtViewZkVerifier(ZK_ADDR, clusterName, errStateMap)));
Assert.assertTrue(result, "Cluster verification fails");
// reset a non-exist partition, should throw exception
String hostName = "localhost_12918";
String instanceUrl = getInstanceUrl(clusterName, hostName);
Map<String, String> paramMap = new HashMap<String, String>();
paramMap.put(JsonParameters.MANAGEMENT_COMMAND, ClusterSetup.resetPartition);
paramMap.put(JsonParameters.PARTITION, "TestDB0_nonExist");
paramMap.put(JsonParameters.RESOURCE, "TestDB0");
LOG.info("IGNORABLE exception: test reset non-exist partition");
TestHelixAdminScenariosRest.assertSuccessPostOperation(instanceUrl, paramMap, true);
// reset 2 error partitions
errPartitions.clear();
participants[0].setTransition(new ErrTransitionWithResetCnt(errPartitions));
clearStatusUpdate(clusterName, "localhost_12918", "TestDB0", "TestDB0_4");
_errToOfflineInvoked.set(0);
paramMap.put(JsonParameters.PARTITION, "TestDB0_4 TestDB0_8");
TestHelixAdminScenariosRest.assertSuccessPostOperation(instanceUrl, paramMap, false);
for (int i = 0; i < 10; i++) {
// wait reset to be done
Thread.sleep(400);
LOG.info("IGNORABLE exception: test reset non-error partition");
TestHelixAdminScenariosRest.assertSuccessPostOperation(instanceUrl, paramMap, true);
result = ClusterStateVerifier.verifyByZkCallback(new ClusterStateVerifier.BestPossAndExtViewZkVerifier(ZK_ADDR, clusterName));
if (result == true) {
break;
}
}
Assert.assertTrue(result);
Assert.assertEquals(_errToOfflineInvoked.get(), 2, "reset() should be invoked 2 times");
// clean up
controller.syncStop();
for (int i = 0; i < 5; i++) {
participants[i].syncStop();
}
System.out.println("END " + clusterName + " at " + new Date(System.currentTimeMillis()));
}
use of org.apache.helix.integration.manager.ClusterControllerManager in project helix by apache.
the class TestJobQueuesResource method test.
@Test
public void test() throws Exception {
String className = TestHelper.getTestClassName();
String methodName = TestHelper.getTestMethodName();
String clusterName = className + "_" + methodName;
final int n = 5;
final int p = 20;
final int r = 3;
System.out.println("START " + clusterName + " at " + new Date(System.currentTimeMillis()));
_gSetupTool.addCluster(clusterName, true);
for (int i = 0; i < n; i++) {
String instanceName = "localhost_" + (12918 + i);
_gSetupTool.addInstanceToCluster(clusterName, instanceName);
}
// Set up target db
_gSetupTool.addResourceToCluster(clusterName, WorkflowGenerator.DEFAULT_TGT_DB, p, "MasterSlave");
_gSetupTool.rebalanceStorageCluster(clusterName, WorkflowGenerator.DEFAULT_TGT_DB, r);
Map<String, TaskFactory> taskFactoryReg = new HashMap<String, TaskFactory>();
taskFactoryReg.put("DummyTask", new TaskFactory() {
@Override
public Task createNewTask(TaskCallbackContext context) {
return new MockTask(context);
}
});
// Start dummy participants
MockParticipantManager[] participants = new MockParticipantManager[n];
for (int i = 0; i < n; i++) {
String instanceName = "localhost_" + (12918 + i);
participants[i] = new MockParticipantManager(ZK_ADDR, clusterName, instanceName);
// Register a Task state model factory.
StateMachineEngine stateMachine = participants[i].getStateMachineEngine();
stateMachine.registerStateModelFactory("Task", new TaskStateModelFactory(participants[i], taskFactoryReg));
participants[i].syncStart();
}
// start controller
String controllerName = "controller";
ClusterControllerManager controller = new ClusterControllerManager(ZK_ADDR, clusterName, controllerName);
controller.syncStart();
boolean result = ClusterStateVerifier.verifyByZkCallback(new ClusterStateVerifier.BestPossAndExtViewZkVerifier(ZK_ADDR, clusterName));
Assert.assertTrue(result);
// Start a queue
String queueName = "myQueue1";
LOG.info("Starting job-queue: " + queueName);
String jobQueueYamlConfig = "name: " + queueName;
String resourceUrl = "http://localhost:" + ADMIN_PORT + "/clusters/" + clusterName + "/jobQueues";
ZNRecord postRet = AdminTestHelper.post(_gClient, resourceUrl, jobQueueYamlConfig);
LOG.info("Started job-queue: " + queueName + ", ret: " + postRet);
LOG.info("Getting all job-queues");
ZNRecord getRet = AdminTestHelper.get(_gClient, resourceUrl);
LOG.info("Got job-queues: " + getRet);
// Enqueue job
resourceUrl = "http://localhost:" + ADMIN_PORT + "/clusters/" + clusterName + "/jobQueues/" + queueName;
WorkflowBean wfBean = new WorkflowBean();
wfBean.name = queueName;
JobBean jBean1 = new JobBean();
jBean1.name = "myJob1";
jBean1.command = "DummyTask";
jBean1.targetResource = WorkflowGenerator.DEFAULT_TGT_DB;
jBean1.targetPartitionStates = Lists.newArrayList("MASTER");
JobBean jBean2 = new JobBean();
jBean2.name = "myJob2";
jBean2.command = "DummyTask";
jBean2.targetResource = WorkflowGenerator.DEFAULT_TGT_DB;
jBean2.targetPartitionStates = Lists.newArrayList("SLAVE");
wfBean.jobs = Lists.newArrayList(jBean1, jBean2);
String jobYamlConfig = new Yaml().dump(wfBean);
LOG.info("Enqueuing jobs: " + jobQueueYamlConfig);
Map<String, String> paraMap = new HashMap<String, String>();
paraMap.put(JsonParameters.MANAGEMENT_COMMAND, TaskDriver.DriverCommand.start.toString());
String postBody = String.format("%s=%s&%s=%s", JsonParameters.JSON_PARAMETERS, ClusterRepresentationUtil.ObjectToJson(paraMap), ResourceUtil.YamlParamKey.NEW_JOB.toString(), jobYamlConfig);
postRet = AdminTestHelper.post(_gClient, resourceUrl, postBody);
LOG.info("Enqueued job, ret: " + postRet);
// Get job
resourceUrl = "http://localhost:" + ADMIN_PORT + "/clusters/" + clusterName + "/jobQueues/" + queueName + "/" + jBean1.name;
getRet = AdminTestHelper.get(_gClient, resourceUrl);
LOG.info("Got job: " + getRet);
// Stop job queue
resourceUrl = "http://localhost:" + ADMIN_PORT + "/clusters/" + clusterName + "/jobQueues/" + queueName;
paraMap.put(JsonParameters.MANAGEMENT_COMMAND, TaskDriver.DriverCommand.stop.toString());
postBody = String.format("%s=%s", JsonParameters.JSON_PARAMETERS, ClusterRepresentationUtil.ObjectToJson(paraMap));
postRet = AdminTestHelper.post(_gClient, resourceUrl, postBody);
LOG.info("Stopped job-queue, ret: " + postRet);
// Delete a job
resourceUrl = "http://localhost:" + ADMIN_PORT + "/clusters/" + clusterName + "/jobQueues/" + queueName + "/" + jBean2.name;
AdminTestHelper.delete(_gClient, resourceUrl);
LOG.info("Delete a job: ");
// Resume job queue
resourceUrl = "http://localhost:" + ADMIN_PORT + "/clusters/" + clusterName + "/jobQueues/" + queueName;
paraMap.put(JsonParameters.MANAGEMENT_COMMAND, TaskDriver.DriverCommand.resume.toString());
postBody = String.format("%s=%s", JsonParameters.JSON_PARAMETERS, ClusterRepresentationUtil.ObjectToJson(paraMap));
postRet = AdminTestHelper.post(_gClient, resourceUrl, postBody);
LOG.info("Resumed job-queue, ret: " + postRet);
// Flush job queue
paraMap.put(JsonParameters.MANAGEMENT_COMMAND, "flush");
postBody = JsonParameters.JSON_PARAMETERS + "=" + ClusterRepresentationUtil.ObjectToJson(paraMap);
postRet = AdminTestHelper.post(_gClient, resourceUrl, postBody);
LOG.info("Flushed job-queue, ret: " + postRet);
// clean up
controller.syncStop();
for (int i = 0; i < n; i++) {
if (participants[i] != null && participants[i].isConnected()) {
participants[i].syncStop();
}
}
System.out.println("END " + clusterName + " at " + new Date(System.currentTimeMillis()));
}
Aggregations