use of org.apache.helix.task.WorkflowConfig in project incubator-gobblin by apache.
the class GobblinHelixJobLauncher method submitJobToHelix.
/**
* Submit a job to run.
*/
private void submitJobToHelix(JobConfig.Builder jobConfigBuilder) throws Exception {
WorkflowConfig workflowConfig = this.helixTaskDriver.getWorkflowConfig(this.helixManager, this.helixQueueName);
// If the queue is present, but in delete state then wait for cleanup before recreating the queue
if (workflowConfig != null && workflowConfig.getTargetState() == TargetState.DELETE) {
GobblinHelixTaskDriver gobblinHelixTaskDriver = new GobblinHelixTaskDriver(this.helixManager);
gobblinHelixTaskDriver.deleteWorkflow(this.helixQueueName, this.jobQueueDeleteTimeoutSeconds);
// if we get here then the workflow was successfully deleted
workflowConfig = null;
}
// Create one queue for each job with the job name being the queue name
if (workflowConfig == null) {
JobQueue jobQueue = new JobQueue.Builder(this.helixQueueName).build();
this.helixTaskDriver.createQueue(jobQueue);
LOGGER.info("Created job queue {}", this.helixQueueName);
} else {
LOGGER.info("Job queue {} already exists", this.helixQueueName);
}
// Put the job into the queue
this.helixTaskDriver.enqueueJob(this.jobContext.getJobName(), this.jobContext.getJobId(), jobConfigBuilder);
}
use of org.apache.helix.task.WorkflowConfig in project helix by apache.
the class WorkflowAccessor method getWorkflows.
@GET
public Response getWorkflows(@PathParam("clusterId") String clusterId) {
TaskDriver taskDriver = getTaskDriver(clusterId);
Map<String, WorkflowConfig> workflowConfigMap = taskDriver.getWorkflows();
Map<String, List<String>> dataMap = new HashMap<>();
dataMap.put(WorkflowProperties.Workflows.name(), new ArrayList<>(workflowConfigMap.keySet()));
return JSONRepresentation(dataMap);
}
use of org.apache.helix.task.WorkflowConfig in project helix by apache.
the class TestWorkflowAccessor method testCreateWorkflow.
@Test(dependsOnMethods = "testGetWorkflowContext")
public void testCreateWorkflow() throws IOException {
System.out.println("Start test :" + TestHelper.getTestMethodName());
TaskDriver driver = getTaskDriver(CLUSTER_NAME);
// Create one time workflow
Entity entity = Entity.entity(WORKFLOW_INPUT, MediaType.APPLICATION_JSON_TYPE);
put("clusters/" + CLUSTER_NAME + "/workflows/" + TEST_WORKFLOW_NAME, null, entity, Response.Status.OK.getStatusCode());
WorkflowConfig workflowConfig = driver.getWorkflowConfig(TEST_WORKFLOW_NAME);
Assert.assertNotNull(workflowConfig);
Assert.assertEquals(workflowConfig.getJobDag().getAllNodes().size(), 2);
// Create JobQueue
JobQueue.Builder jobQueue = new JobQueue.Builder(TEST_QUEUE_NAME).setWorkflowConfig(driver.getWorkflowConfig(TEST_WORKFLOW_NAME));
entity = Entity.entity(OBJECT_MAPPER.writeValueAsString(Collections.singletonMap(WorkflowAccessor.WorkflowProperties.WorkflowConfig.name(), jobQueue.build().getWorkflowConfig().getRecord().getSimpleFields())), MediaType.APPLICATION_JSON_TYPE);
put("clusters/" + CLUSTER_NAME + "/workflows/" + TEST_QUEUE_NAME, null, entity, Response.Status.OK.getStatusCode());
workflowConfig = driver.getWorkflowConfig(TEST_QUEUE_NAME);
Assert.assertNotNull(workflowConfig);
Assert.assertTrue(workflowConfig.isJobQueue());
Assert.assertEquals(workflowConfig.getJobDag().getAllNodes().size(), 0);
}
use of org.apache.helix.task.WorkflowConfig in project helix by apache.
the class TestTaskRebalancer method testNamedQueue.
@Test
public void testNamedQueue() throws Exception {
String queueName = TestHelper.getTestMethodName();
// Create a queue
JobQueue queue = new JobQueue.Builder(queueName).build();
_driver.createQueue(queue);
// Enqueue jobs
Set<String> master = Sets.newHashSet("MASTER");
Set<String> slave = Sets.newHashSet("SLAVE");
JobConfig.Builder job1 = new JobConfig.Builder().setCommand(MockTask.TASK_COMMAND).setTargetResource(WorkflowGenerator.DEFAULT_TGT_DB).setTargetPartitionStates(master);
JobConfig.Builder job2 = new JobConfig.Builder().setCommand(MockTask.TASK_COMMAND).setTargetResource(WorkflowGenerator.DEFAULT_TGT_DB).setTargetPartitionStates(slave);
_driver.enqueueJob(queueName, "masterJob", job1);
_driver.enqueueJob(queueName, "slaveJob", job2);
// Ensure successful completion
String namespacedJob1 = queueName + "_masterJob";
String namespacedJob2 = queueName + "_slaveJob";
_driver.pollForJobState(queueName, namespacedJob1, TaskState.COMPLETED);
_driver.pollForJobState(queueName, namespacedJob2, TaskState.COMPLETED);
JobContext masterJobContext = _driver.getJobContext(namespacedJob1);
JobContext slaveJobContext = _driver.getJobContext(namespacedJob2);
// Ensure correct ordering
long job1Finish = masterJobContext.getFinishTime();
long job2Start = slaveJobContext.getStartTime();
Assert.assertTrue(job2Start >= job1Finish);
// Flush queue and check cleanup
_driver.flushQueue(queueName);
HelixDataAccessor accessor = _manager.getHelixDataAccessor();
PropertyKey.Builder keyBuilder = accessor.keyBuilder();
Assert.assertNull(accessor.getProperty(keyBuilder.idealStates(namespacedJob1)));
Assert.assertNull(accessor.getProperty(keyBuilder.resourceConfig(namespacedJob1)));
Assert.assertNull(accessor.getProperty(keyBuilder.idealStates(namespacedJob2)));
Assert.assertNull(accessor.getProperty(keyBuilder.resourceConfig(namespacedJob2)));
WorkflowConfig workflowCfg = _driver.getWorkflowConfig(queueName);
JobDag dag = workflowCfg.getJobDag();
Assert.assertFalse(dag.getAllNodes().contains(namespacedJob1));
Assert.assertFalse(dag.getAllNodes().contains(namespacedJob2));
Assert.assertFalse(dag.getChildrenToParents().containsKey(namespacedJob1));
Assert.assertFalse(dag.getChildrenToParents().containsKey(namespacedJob2));
Assert.assertFalse(dag.getParentsToChildren().containsKey(namespacedJob1));
Assert.assertFalse(dag.getParentsToChildren().containsKey(namespacedJob2));
}
use of org.apache.helix.task.WorkflowConfig in project helix by apache.
the class TestTaskRebalancerFailover method test.
@Test
public void test() throws Exception {
String queueName = TestHelper.getTestMethodName();
// Create a queue
LOG.info("Starting job-queue: " + queueName);
JobQueue queue = new JobQueue.Builder(queueName).build();
_driver.createQueue(queue);
// Enqueue jobs
Set<String> master = Sets.newHashSet("MASTER");
JobConfig.Builder job = new JobConfig.Builder().setCommand(MockTask.TASK_COMMAND).setTargetResource(WorkflowGenerator.DEFAULT_TGT_DB).setTargetPartitionStates(master);
String job1Name = "masterJob";
LOG.info("Enqueuing job: " + job1Name);
_driver.enqueueJob(queueName, job1Name, job);
// check all tasks completed on MASTER
String namespacedJob1 = String.format("%s_%s", queueName, job1Name);
_driver.pollForJobState(queueName, namespacedJob1, TaskState.COMPLETED);
HelixDataAccessor accessor = _manager.getHelixDataAccessor();
PropertyKey.Builder keyBuilder = accessor.keyBuilder();
ExternalView ev = accessor.getProperty(keyBuilder.externalView(WorkflowGenerator.DEFAULT_TGT_DB));
JobContext ctx = _driver.getJobContext(namespacedJob1);
Set<String> failOverPartitions = Sets.newHashSet();
for (int p = 0; p < _numParitions; p++) {
String instanceName = ctx.getAssignedParticipant(p);
Assert.assertNotNull(instanceName);
String partitionName = ctx.getTargetForPartition(p);
Assert.assertNotNull(partitionName);
String state = ev.getStateMap(partitionName).get(instanceName);
Assert.assertNotNull(state);
Assert.assertEquals(state, "MASTER");
if (instanceName.equals("localhost_12918")) {
failOverPartitions.add(partitionName);
}
}
// enqueue another master job and fail localhost_12918
String job2Name = "masterJob2";
String namespacedJob2 = String.format("%s_%s", queueName, job2Name);
LOG.info("Enqueuing job: " + job2Name);
_driver.enqueueJob(queueName, job2Name, job);
_driver.pollForJobState(queueName, namespacedJob2, TaskState.IN_PROGRESS);
_participants[0].syncStop();
_driver.pollForJobState(queueName, namespacedJob2, TaskState.COMPLETED);
// tasks previously assigned to localhost_12918 should be re-scheduled on new master
ctx = _driver.getJobContext(namespacedJob2);
ev = accessor.getProperty(keyBuilder.externalView(WorkflowGenerator.DEFAULT_TGT_DB));
for (int p = 0; p < _numParitions; p++) {
String partitionName = ctx.getTargetForPartition(p);
Assert.assertNotNull(partitionName);
if (failOverPartitions.contains(partitionName)) {
String instanceName = ctx.getAssignedParticipant(p);
Assert.assertNotNull(instanceName);
Assert.assertNotSame(instanceName, "localhost_12918");
String state = ev.getStateMap(partitionName).get(instanceName);
Assert.assertNotNull(state);
Assert.assertEquals(state, "MASTER");
}
}
// Flush queue and check cleanup
_driver.flushQueue(queueName);
Assert.assertNull(accessor.getProperty(keyBuilder.idealStates(namespacedJob1)));
Assert.assertNull(accessor.getProperty(keyBuilder.resourceConfig(namespacedJob1)));
Assert.assertNull(accessor.getProperty(keyBuilder.idealStates(namespacedJob2)));
Assert.assertNull(accessor.getProperty(keyBuilder.resourceConfig(namespacedJob2)));
WorkflowConfig workflowCfg = _driver.getWorkflowConfig(queueName);
JobDag dag = workflowCfg.getJobDag();
Assert.assertFalse(dag.getAllNodes().contains(namespacedJob1));
Assert.assertFalse(dag.getAllNodes().contains(namespacedJob2));
Assert.assertFalse(dag.getChildrenToParents().containsKey(namespacedJob1));
Assert.assertFalse(dag.getChildrenToParents().containsKey(namespacedJob2));
Assert.assertFalse(dag.getParentsToChildren().containsKey(namespacedJob1));
Assert.assertFalse(dag.getParentsToChildren().containsKey(namespacedJob2));
}
Aggregations