use of org.apache.helix.task.Workflow in project helix by apache.
the class TestJobTimeout method testNoSlaveToRunTask.
@Test
public void testNoSlaveToRunTask() throws InterruptedException {
// first job can't be assigned to any instance and stuck, timeout, second job runs and workflow succeed.
final String FIRST_JOB = "first_job";
final String SECOND_JOB = "second_job";
final String WORKFLOW_NAME = TestHelper.getTestMethodName();
final String DB_NAME = WorkflowGenerator.DEFAULT_TGT_DB;
JobConfig.Builder firstJobBuilder = new JobConfig.Builder().setWorkflow(WORKFLOW_NAME).setTargetResource(DB_NAME).setTargetPartitionStates(Sets.newHashSet(MasterSlaveSMD.States.SLAVE.name())).setCommand(MockTask.TASK_COMMAND).setTimeout(1000);
JobConfig.Builder secondJobBuilder = new JobConfig.Builder().setWorkflow(WORKFLOW_NAME).setTargetResource(DB_NAME).setTargetPartitionStates(Sets.newHashSet(MasterSlaveSMD.States.MASTER.name())).setCommand(MockTask.TASK_COMMAND).setIgnoreDependentJobFailure(// ignore first job's timeout
true);
WorkflowConfig.Builder workflowConfigBuilder = new WorkflowConfig.Builder(WORKFLOW_NAME).setFailureThreshold(// workflow ignores first job's timeout and schedule second job and succeed.
1);
Workflow.Builder workflowBuilder = new Workflow.Builder(WORKFLOW_NAME).setWorkflowConfig(workflowConfigBuilder.build()).addJob(FIRST_JOB, firstJobBuilder).addJob(SECOND_JOB, secondJobBuilder).addParentChildDependency(FIRST_JOB, SECOND_JOB);
_driver.start(workflowBuilder.build());
_driver.pollForJobState(WORKFLOW_NAME, TaskUtil.getNamespacedJobName(WORKFLOW_NAME, FIRST_JOB), TaskState.TIMED_OUT);
_driver.pollForJobState(WORKFLOW_NAME, TaskUtil.getNamespacedJobName(WORKFLOW_NAME, SECOND_JOB), TaskState.COMPLETED);
_driver.pollForWorkflowState(WORKFLOW_NAME, TaskState.COMPLETED);
JobContext jobContext = _driver.getJobContext(TaskUtil.getNamespacedJobName(WORKFLOW_NAME, FIRST_JOB));
for (int pId : jobContext.getPartitionSet()) {
// No task assigned for first job
Assert.assertEquals(jobContext.getPartitionState(pId), null);
}
}
use of org.apache.helix.task.Workflow in project helix by apache.
the class TestRebalanceRunningTask method testFixedTargetTaskAndDisabledRebalanceAndNodeAdded.
/**
* Task type: fixed target
* Rebalance raunning task: disabled
* Story: new node added
*/
@Test
public void testFixedTargetTaskAndDisabledRebalanceAndNodeAdded() throws InterruptedException {
WORKFLOW = TestHelper.getTestMethodName();
JobConfig.Builder jobBuilder = new JobConfig.Builder().setWorkflow(WORKFLOW).setTargetResource(DATABASE).setTargetPartitionStates(Sets.newHashSet(MasterSlaveSMD.States.MASTER.name())).setNumConcurrentTasksPerInstance(100).setFailureThreshold(2).setMaxAttemptsPerTask(2).setCommand(MockTask.TASK_COMMAND).setJobCommandConfigMap(// task stuck
ImmutableMap.of(MockTask.TIMEOUT_CONFIG, "99999999"));
Workflow.Builder workflowBuilder = new Workflow.Builder(WORKFLOW).addJob(JOB, jobBuilder);
_driver.start(workflowBuilder.build());
// All tasks stuck on the same instance
Assert.assertTrue(checkTasksOnSameInstances());
// Add a new instance, partition is rebalanced
startParticipant(_initialNumNodes);
HelixClusterVerifier clusterVerifier = new BestPossibleExternalViewVerifier.Builder(CLUSTER_NAME).setZkClient(_gZkClient).setResources(Sets.newHashSet(DATABASE)).build();
Assert.assertTrue(clusterVerifier.verify(10 * 1000));
// Running tasks are also rebalanced, even though RebalanceRunningTask is disabled
Assert.assertTrue(checkTasksOnDifferentInstances());
}
use of org.apache.helix.task.Workflow in project helix by apache.
the class TestTaskAssignment method testTaskAssignment.
@Test
public void testTaskAssignment() throws InterruptedException {
_setupTool.getClusterManagementTool().enableInstance(CLUSTER_NAME, PARTICIPANT_PREFIX + "_" + (_startPort + 0), false);
String jobResource = TestHelper.getTestMethodName();
JobConfig.Builder jobBuilder = new JobConfig.Builder().setCommand(MockTask.TASK_COMMAND).setTargetResource(WorkflowGenerator.DEFAULT_TGT_DB);
Workflow flow = WorkflowGenerator.generateSingleJobWorkflowBuilder(jobResource, jobBuilder).build();
_driver.start(flow);
// Wait 1 sec. The task should not be complete since it is not assigned.
Thread.sleep(1000L);
// The task is not assigned so the task state should be null in this case.
Assert.assertNull(_driver.getJobContext(TaskUtil.getNamespacedJobName(jobResource)).getPartitionState(0));
}
use of org.apache.helix.task.Workflow in project helix by apache.
the class TestWorkflowJobDependency method testWorkflowWithOutDependencies.
@Test
public void testWorkflowWithOutDependencies() throws InterruptedException {
String workflowName = TestHelper.getTestMethodName();
// Workflow setup
LOG.info("Start setup for workflow: " + workflowName);
Workflow.Builder builder = new Workflow.Builder(workflowName);
for (int i = 0; i < _numDbs; i++) {
// Let each job delay for 2 secs.
JobConfig.Builder jobConfig = new JobConfig.Builder().setCommand(MockTask.TASK_COMMAND).setTargetResource(_testDbs.get(i)).setTargetPartitionStates(Sets.newHashSet("SLAVE", "MASTER")).setJobCommandConfigMap(WorkflowGenerator.DEFAULT_COMMAND_CONFIG);
String jobName = "job" + _testDbs.get(i);
builder.addJob(jobName, jobConfig);
}
// Start workflow
Workflow workflow = builder.build();
_driver.start(workflow);
// Wait until the workflow completes
_driver.pollForWorkflowState(workflowName, TaskState.COMPLETED);
WorkflowContext workflowContext = _driver.getWorkflowContext(workflowName);
long startTime = workflowContext.getStartTime();
long finishTime = workflowContext.getFinishTime();
// Update the start time range.
for (String jobName : workflow.getJobConfigs().keySet()) {
JobContext context = _driver.getJobContext(jobName);
LOG.info(String.format("JOB: %s starts from %s finishes at %s.", jobName, context.getStartTime(), context.getFinishTime()));
// Find job start time range.
startTime = Math.max(context.getStartTime(), startTime);
finishTime = Math.min(context.getFinishTime(), finishTime);
}
// All jobs have a valid overlap time range.
Assert.assertTrue(startTime <= finishTime);
}
use of org.apache.helix.task.Workflow in project helix by apache.
the class WorkflowAccessor method createWorkflow.
@PUT
@Path("{workflowId}")
public Response createWorkflow(@PathParam("clusterId") String clusterId, @PathParam("workflowId") String workflowId, String content) {
TaskDriver driver = getTaskDriver(clusterId);
Map<String, String> cfgMap;
try {
JsonNode root = OBJECT_MAPPER.readTree(content);
cfgMap = OBJECT_MAPPER.readValue(root.get(WorkflowProperties.WorkflowConfig.name()).toString(), TypeFactory.defaultInstance().constructMapType(HashMap.class, String.class, String.class));
WorkflowConfig workflowConfig = WorkflowConfig.Builder.fromMap(cfgMap).build();
// Since JobQueue can keep adding jobs, Helix create JobQueue will ignore the jobs
if (workflowConfig.isJobQueue()) {
driver.start(new JobQueue.Builder(workflowId).setWorkflowConfig(workflowConfig).build());
return OK();
}
Workflow.Builder workflow = new Workflow.Builder(workflowId);
if (root.get(WorkflowProperties.Jobs.name()) != null) {
Map<String, JobConfig.Builder> jobConfigs = getJobConfigs((ArrayNode) root.get(WorkflowProperties.Jobs.name()));
for (Map.Entry<String, JobConfig.Builder> job : jobConfigs.entrySet()) {
workflow.addJob(job.getKey(), job.getValue());
}
}
if (root.get(WorkflowProperties.ParentJobs.name()) != null) {
Map<String, List<String>> parentJobs = OBJECT_MAPPER.readValue(root.get(WorkflowProperties.ParentJobs.name()).toString(), TypeFactory.defaultInstance().constructMapType(HashMap.class, String.class, List.class));
for (Map.Entry<String, List<String>> entry : parentJobs.entrySet()) {
String parentJob = entry.getKey();
for (String childJob : entry.getValue()) {
workflow.addParentChildDependency(parentJob, childJob);
}
}
}
driver.start(workflow.build());
} catch (IOException e) {
return badRequest(String.format("Invalid input of Workflow %s for reason : %s", workflowId, e.getMessage()));
} catch (HelixException e) {
return badRequest(String.format("Failed to create workflow %s for reason : %s", workflowId, e.getMessage()));
}
return OK();
}
Aggregations