use of org.apache.helix.task.JobConfig in project helix by apache.
the class JobAccessor method getJobConfig.
@GET
@Path("{jobName}/configs")
public Response getJobConfig(@PathParam("clusterId") String clusterId, @PathParam("workflowName") String workflowName, @PathParam("jobName") String jobName) {
TaskDriver driver = getTaskDriver(clusterId);
JobConfig jobConfig = driver.getJobConfig(jobName);
if (jobConfig != null) {
return JSONRepresentation(jobConfig.getRecord());
}
return badRequest("Job config for " + jobName + " does not exists");
}
use of org.apache.helix.task.JobConfig in project helix by apache.
the class TestTaskThrottling method testJobPriority.
@Test(dependsOnMethods = { "testTaskThrottle" })
public void testJobPriority() throws InterruptedException {
int numTasks = 30 * _numNodes;
int perNodeTaskLimitation = 5;
JobConfig.Builder jobConfig = generateLongRunJobConfig(numTasks);
// Configuring participants
setParticipantsCapacity(perNodeTaskLimitation);
// schedule job1
String jobName1 = "PriorityJob1";
Workflow flow1 = WorkflowGenerator.generateSingleJobWorkflowBuilder(jobName1, jobConfig).build();
_driver.start(flow1);
_driver.pollForJobState(flow1.getName(), TaskUtil.getNamespacedJobName(flow1.getName(), jobName1), TaskState.IN_PROGRESS);
// Wait for tasks to be picked up
Thread.sleep(4000);
Assert.assertEquals(countRunningPartition(flow1, jobName1), _numNodes * perNodeTaskLimitation);
// schedule job2
String jobName2 = "PriorityJob2";
Workflow flow2 = WorkflowGenerator.generateSingleJobWorkflowBuilder(jobName2, jobConfig).build();
_driver.start(flow2);
_driver.pollForJobState(flow2.getName(), TaskUtil.getNamespacedJobName(flow2.getName(), jobName2), TaskState.IN_PROGRESS);
// Wait for tasks to be picked up
Thread.sleep(1500);
Assert.assertEquals(countRunningPartition(flow2, jobName2), 0);
// Increasing participants capacity
perNodeTaskLimitation = 2 * perNodeTaskLimitation;
setParticipantsCapacity(perNodeTaskLimitation);
Thread.sleep(1500);
// Additional capacity should all be used by job1
Assert.assertEquals(countRunningPartition(flow1, jobName1), _numNodes * perNodeTaskLimitation);
Assert.assertEquals(countRunningPartition(flow2, jobName2), 0);
_driver.stop(flow1.getName());
_driver.pollForWorkflowState(flow1.getName(), TaskState.STOPPED);
_driver.stop(flow2.getName());
_driver.pollForWorkflowState(flow2.getName(), TaskState.STOPPED);
}
use of org.apache.helix.task.JobConfig in project helix by apache.
the class TestTaskThrottling method testTaskThrottle.
@Test
public void testTaskThrottle() throws InterruptedException {
int numTasks = 30 * _numNodes;
int perNodeTaskLimitation = 5;
JobConfig.Builder jobConfig = generateLongRunJobConfig(numTasks);
// 1. Job executed in the participants with no limitation
String jobName1 = "Job1";
Workflow flow = WorkflowGenerator.generateSingleJobWorkflowBuilder(jobName1, jobConfig).build();
_driver.start(flow);
_driver.pollForJobState(flow.getName(), TaskUtil.getNamespacedJobName(flow.getName(), jobName1), TaskState.IN_PROGRESS);
// Wait for tasks to be picked up
Thread.sleep(1500);
Assert.assertEquals(countRunningPartition(flow, jobName1), numTasks);
_driver.stop(flow.getName());
_driver.pollForWorkflowState(flow.getName(), TaskState.STOPPED);
// 2. Job executed in the participants with max task limitation
// Configuring cluster
HelixConfigScope scope = new HelixConfigScopeBuilder(HelixConfigScope.ConfigScopeProperty.CLUSTER).forCluster(CLUSTER_NAME).build();
Map<String, String> properties = new HashMap<String, String>();
properties.put(ClusterConfig.ClusterConfigProperty.MAX_CONCURRENT_TASK_PER_INSTANCE.name(), new Integer(perNodeTaskLimitation).toString());
_setupTool.getClusterManagementTool().setConfig(scope, properties);
String jobName2 = "Job2";
flow = WorkflowGenerator.generateSingleJobWorkflowBuilder(jobName2, jobConfig).build();
_driver.start(flow);
_driver.pollForJobState(flow.getName(), TaskUtil.getNamespacedJobName(flow.getName(), jobName2), TaskState.IN_PROGRESS);
// Wait for tasks to be picked up
Thread.sleep(4000);
Assert.assertEquals(countRunningPartition(flow, jobName2), _numNodes * perNodeTaskLimitation);
_driver.stop(flow.getName());
_driver.pollForWorkflowState(flow.getName(), TaskState.STOPPED);
// 3. Ensure job can finish normally
jobConfig.setJobCommandConfigMap(ImmutableMap.of(MockTask.TIMEOUT_CONFIG, "10"));
String jobName3 = "Job3";
flow = WorkflowGenerator.generateSingleJobWorkflowBuilder(jobName3, jobConfig).build();
_driver.start(flow);
_driver.pollForJobState(flow.getName(), TaskUtil.getNamespacedJobName(flow.getName(), jobName3), TaskState.COMPLETED);
}
use of org.apache.helix.task.JobConfig in project helix by apache.
the class TestWorkflowJobDependency method testWorkflowWithDependencies.
@Test
public void testWorkflowWithDependencies() throws InterruptedException {
String workflowName = TestHelper.getTestMethodName();
final int PARALLEL_NUM = 2;
// Workflow setup
WorkflowConfig.Builder workflowcfgBuilder = new WorkflowConfig.Builder().setWorkflowId(workflowName).setParallelJobs(PARALLEL_NUM);
Workflow.Builder builder = new Workflow.Builder(workflowName);
builder.setWorkflowConfig(workflowcfgBuilder.build());
builder.addParentChildDependency("job" + _testDbs.get(0), "job" + _testDbs.get(1));
for (int i = 0; i < 2; i++) {
JobConfig.Builder jobConfig = new JobConfig.Builder().setCommand(MockTask.TASK_COMMAND).setTargetResource(_testDbs.get(i)).setTargetPartitionStates(Sets.newHashSet("SLAVE", "MASTER")).setJobCommandConfigMap(WorkflowGenerator.DEFAULT_COMMAND_CONFIG);
String jobName = "job" + _testDbs.get(i);
builder.addJob(jobName, jobConfig);
}
// Start workflow
Workflow workflow = builder.build();
_driver.start(workflow);
// Wait until the workflow completes
_driver.pollForWorkflowState(workflowName, TaskState.COMPLETED);
JobContext context1 = _driver.getJobContext(TaskUtil.getNamespacedJobName(workflowName, "job" + _testDbs.get(0)));
JobContext context2 = _driver.getJobContext(TaskUtil.getNamespacedJobName(workflowName, "job" + _testDbs.get(1)));
Assert.assertTrue(context2.getStartTime() - context1.getFinishTime() >= 0L);
}
use of org.apache.helix.task.JobConfig in project helix by apache.
the class TaskAdmin method list.
private static void list(TaskDriver taskDriver, String workflow) {
WorkflowConfig wCfg = taskDriver.getWorkflowConfig(workflow);
if (wCfg == null) {
LOG.error("Workflow " + workflow + " does not exist!");
return;
}
WorkflowContext wCtx = taskDriver.getWorkflowContext(workflow);
LOG.info("Workflow " + workflow + " consists of the following tasks: " + wCfg.getJobDag().getAllNodes());
String workflowState = (wCtx != null) ? wCtx.getWorkflowState().name() : TaskState.NOT_STARTED.name();
LOG.info("Current state of workflow is " + workflowState);
LOG.info("Job states are: ");
LOG.info("-------");
for (String job : wCfg.getJobDag().getAllNodes()) {
TaskState jobState = (wCtx != null) ? wCtx.getJobState(job) : TaskState.NOT_STARTED;
LOG.info("Job " + job + " is " + jobState);
// fetch job information
JobConfig jCfg = taskDriver.getJobConfig(job);
JobContext jCtx = taskDriver.getJobContext(job);
if (jCfg == null || jCtx == null) {
LOG.info("-------");
continue;
}
// calculate taskPartitions
List<Integer> partitions = Lists.newArrayList(jCtx.getPartitionSet());
Collections.sort(partitions);
// report status
for (Integer partition : partitions) {
String taskId = jCtx.getTaskIdForPartition(partition);
taskId = (taskId != null) ? taskId : jCtx.getTargetForPartition(partition);
LOG.info("Task: " + taskId);
TaskConfig taskConfig = jCfg.getTaskConfig(taskId);
if (taskConfig != null) {
LOG.info("Configuration: " + taskConfig.getConfigMap());
}
TaskPartitionState state = jCtx.getPartitionState(partition);
state = (state != null) ? state : TaskPartitionState.INIT;
LOG.info("State: " + state);
String assignedParticipant = jCtx.getAssignedParticipant(partition);
if (assignedParticipant != null) {
LOG.info("Assigned participant: " + assignedParticipant);
}
LOG.info("-------");
}
LOG.info("-------");
}
}
Aggregations