use of org.apache.helix.task.Workflow in project helix by apache.
the class JobQueuesResource method post.
/**
* Add a new job queue
* <p>
* Usage:
* <code>curl -d @'{jobQueueConfig.yaml}'
* -H 'Content-Type: application/json' http://{host:port}/clusters/{clusterName}/jobQueues
* <p>
* For jobQueueConfig.yaml, see {@link Workflow#parse(String)}
*/
@Override
public Representation post(Representation entity) {
try {
String clusterName = ResourceUtil.getAttributeFromRequest(getRequest(), ResourceUtil.RequestKey.CLUSTER_NAME);
ZkClient zkClient = ResourceUtil.getAttributeFromCtx(getContext(), ResourceUtil.ContextKey.ZKCLIENT);
Form form = new Form(entity);
// Get the job queue and submit it
if (form.size() < 1) {
throw new HelixException("Yaml job queue config is required!");
}
Parameter payload = form.get(0);
String yamlPayload = payload.getName();
if (yamlPayload == null) {
throw new HelixException("Yaml job queue config is required!");
}
Workflow workflow = Workflow.parse(yamlPayload);
JobQueue.Builder jobQueueCfgBuilder = new JobQueue.Builder(workflow.getName());
jobQueueCfgBuilder.fromMap(workflow.getWorkflowConfig().getResourceConfigMap());
TaskDriver driver = new TaskDriver(zkClient, clusterName);
driver.createQueue(jobQueueCfgBuilder.build());
getResponse().setEntity(getHostedEntitiesRepresentation(clusterName));
getResponse().setStatus(Status.SUCCESS_OK);
} catch (Exception e) {
getResponse().setEntity(ClusterRepresentationUtil.getErrorAsJsonStringFromException(e), MediaType.APPLICATION_JSON);
getResponse().setStatus(Status.SUCCESS_OK);
LOG.error("Exception in posting job queue: " + entity, e);
}
return null;
}
use of org.apache.helix.task.Workflow in project helix by apache.
the class JobQueueResource method post.
/**
* Start a new job in a job queue, or stop/resume/flush/delete a job queue
* <p>
* Usage:
* <p>
* <li>Start a new job in a job queue:
* <code>curl -d @'./{input.txt}' -H 'Content-Type: application/json'
* http://{host:port}/clusters/{clusterName}/jobQueues/{jobQueue}
* <p>
* input.txt: <code>jsonParameters={"command":"start"}&newJob={newJobConfig.yaml}
* <p>
* For newJobConfig.yaml, see {@link Workflow#parse(String)}
* <li>Stop/resume/flush/delete a job queue:
* <code>curl -d 'jsonParameters={"command":"{stop/resume/flush/delete}"}'
* -H "Content-Type: application/json" http://{host:port}/clusters/{clusterName}/jobQueues/{jobQueue}
*/
@Override
public Representation post(Representation entity) {
String clusterName = ResourceUtil.getAttributeFromRequest(getRequest(), ResourceUtil.RequestKey.CLUSTER_NAME);
String jobQueueName = ResourceUtil.getAttributeFromRequest(getRequest(), ResourceUtil.RequestKey.JOB_QUEUE);
ZkClient zkClient = ResourceUtil.getAttributeFromCtx(getContext(), ResourceUtil.ContextKey.ZKCLIENT);
try {
TaskDriver driver = new TaskDriver(zkClient, clusterName);
Form form = new Form(entity);
JsonParameters jsonParameters = new JsonParameters(form);
TaskDriver.DriverCommand cmd = TaskDriver.DriverCommand.valueOf(jsonParameters.getCommand());
switch(cmd) {
case start:
{
// Get the job queue and submit it
String yamlPayload = ResourceUtil.getYamlParameters(form, ResourceUtil.YamlParamKey.NEW_JOB);
if (yamlPayload == null) {
throw new HelixException("Yaml job config is required!");
}
Workflow workflow = Workflow.parse(yamlPayload);
for (String jobName : workflow.getJobConfigs().keySet()) {
Map<String, String> jobCfgMap = workflow.getJobConfigs().get(jobName);
JobConfig.Builder jobCfgBuilder = JobConfig.Builder.fromMap(jobCfgMap);
if (workflow.getTaskConfigs() != null && workflow.getTaskConfigs().containsKey(jobName)) {
jobCfgBuilder.addTaskConfigs(workflow.getTaskConfigs().get(jobName));
}
driver.enqueueJob(jobQueueName, TaskUtil.getDenamespacedJobName(jobQueueName, jobName), jobCfgBuilder);
}
break;
}
case stop:
{
driver.stop(jobQueueName);
break;
}
case resume:
{
driver.resume(jobQueueName);
break;
}
case flush:
{
driver.flushQueue(jobQueueName);
break;
}
case delete:
{
driver.delete(jobQueueName);
break;
}
case clean:
{
driver.cleanupQueue(jobQueueName);
break;
}
default:
throw new HelixException("Unsupported job queue command: " + cmd);
}
getResponse().setEntity(getHostedEntitiesRepresentation(clusterName, jobQueueName));
getResponse().setStatus(Status.SUCCESS_OK);
} catch (Exception e) {
getResponse().setEntity(ClusterRepresentationUtil.getErrorAsJsonStringFromException(e), MediaType.APPLICATION_JSON);
getResponse().setStatus(Status.SUCCESS_OK);
LOG.error("Error in posting job queue: " + entity, e);
}
return null;
}
use of org.apache.helix.task.Workflow in project helix by apache.
the class TestTaskRebalancer method basic.
private void basic(long jobCompletionTime) throws Exception {
// We use a different resource name in each test method as a work around for a helix participant
// bug where it does
// not clear locally cached state when a resource partition is dropped. Once that is fixed we
// should change these
// tests to use the same resource name and implement a beforeMethod that deletes the task
// resource.
final String jobResource = "basic" + jobCompletionTime;
Map<String, String> commandConfig = ImmutableMap.of(TIMEOUT_CONFIG, String.valueOf(jobCompletionTime));
JobConfig.Builder jobBuilder = JobConfig.Builder.fromMap(WorkflowGenerator.DEFAULT_JOB_CONFIG);
jobBuilder.setJobCommandConfigMap(commandConfig);
Workflow flow = WorkflowGenerator.generateSingleJobWorkflowBuilder(jobResource, jobBuilder).build();
_driver.start(flow);
// Wait for job completion
_driver.pollForWorkflowState(jobResource, TaskState.COMPLETED);
// Ensure all partitions are completed individually
JobContext ctx = _driver.getJobContext(TaskUtil.getNamespacedJobName(jobResource));
for (int i = 0; i < _numParitions; i++) {
Assert.assertEquals(ctx.getPartitionState(i), TaskPartitionState.COMPLETED);
Assert.assertEquals(ctx.getPartitionNumAttempts(i), 1);
}
}
use of org.apache.helix.task.Workflow in project helix by apache.
the class TestTaskRebalancerStopResume method testStopWorkflowInStoppingState.
@Test
public void testStopWorkflowInStoppingState() throws InterruptedException {
final String workflowName = TestHelper.getTestMethodName();
// Create a workflow
Workflow.Builder builder = new Workflow.Builder(workflowName);
// Add 2 jobs
Map<String, String> jobCommandConfigMap = new HashMap<String, String>();
jobCommandConfigMap.put(MockTask.TIMEOUT_CONFIG, "1000000");
jobCommandConfigMap.put(MockTask.NOT_ALLOW_TO_CANCEL, String.valueOf(true));
List<TaskConfig> taskConfigs = ImmutableList.of(new TaskConfig.Builder().setCommand(MockTask.TASK_COMMAND).setTaskId("testTask").build());
JobConfig.Builder job1 = new JobConfig.Builder().setCommand(MockTask.TASK_COMMAND).addTaskConfigs(taskConfigs).setJobCommandConfigMap(jobCommandConfigMap);
String job1Name = "Job1";
JobConfig.Builder job2 = new JobConfig.Builder().setCommand(MockTask.TASK_COMMAND).addTaskConfigs(taskConfigs);
String job2Name = "Job2";
builder.addJob(job1Name, job1);
builder.addJob(job2Name, job2);
_driver.start(builder.build());
Thread.sleep(2000);
_driver.stop(workflowName);
_driver.pollForWorkflowState(workflowName, TaskState.STOPPING);
// Expect job and workflow stuck in STOPPING state.
WorkflowContext workflowContext = _driver.getWorkflowContext(workflowName);
Assert.assertEquals(workflowContext.getJobState(TaskUtil.getNamespacedJobName(workflowName, job1Name)), TaskState.STOPPING);
}
use of org.apache.helix.task.Workflow in project helix by apache.
the class TestTaskRebalancerStopResume method stopAndResumeWorkflow.
@Test
public void stopAndResumeWorkflow() throws Exception {
String workflow = "SomeWorkflow";
Workflow flow = WorkflowGenerator.generateDefaultRepeatedJobWorkflowBuilder(workflow).build();
LOG.info("Starting flow " + workflow);
_driver.start(flow);
_driver.pollForWorkflowState(workflow, TaskState.IN_PROGRESS);
LOG.info("Pausing workflow");
_driver.stop(workflow);
_driver.pollForWorkflowState(workflow, TaskState.STOPPED);
LOG.info("Resuming workflow");
_driver.resume(workflow);
_driver.pollForWorkflowState(workflow, TaskState.COMPLETED);
}
Aggregations