use of com.netflix.conductor.core.execution.ApplicationException in project conductor by Netflix.
the class RedisExecutionDAO method addEventExecution.
@Override
public boolean addEventExecution(EventExecution eventExecution) {
try {
String key = nsKey(EVENT_EXECUTION, eventExecution.getName(), eventExecution.getEvent(), eventExecution.getMessageId());
String json = objectMapper.writeValueAsString(eventExecution);
recordRedisDaoEventRequests("addEventExecution", eventExecution.getEvent());
recordRedisDaoPayloadSize("addEventExecution", json.length(), eventExecution.getEvent(), "n/a");
boolean added = dynoClient.hsetnx(key, eventExecution.getId(), json) == 1L;
if (ttlEventExecutionSeconds > 0) {
dynoClient.expire(key, ttlEventExecutionSeconds);
}
return added;
} catch (Exception e) {
throw new ApplicationException(Code.BACKEND_ERROR, "Unable to add event execution for " + eventExecution.getId(), e);
}
}
use of com.netflix.conductor.core.execution.ApplicationException in project conductor by Netflix.
the class RedisEventHandlerDAO method removeEventHandler.
@Override
public void removeEventHandler(String name) {
EventHandler existing = getEventHandler(name);
if (existing == null) {
throw new ApplicationException(Code.NOT_FOUND, "EventHandler with name " + name + " not found!");
}
dynoClient.hdel(nsKey(EVENT_HANDLERS), name);
recordRedisDaoRequests("removeEventHandler");
removeIndex(existing);
}
use of com.netflix.conductor.core.execution.ApplicationException in project conductor by Netflix.
the class AbstractWorkflowServiceTest method testSimpleWorkflow.
@Test
public void testSimpleWorkflow() throws Exception {
clearWorkflows();
metadataService.getWorkflowDef(LINEAR_WORKFLOW_T1_T2, 1);
String correlationId = "unit_test_1";
Map<String, Object> input = new HashMap<>();
String inputParam1 = "p1 value";
input.put("param1", inputParam1);
input.put("param2", "p2 value");
String workflowInstanceId = startOrLoadWorkflowExecution(LINEAR_WORKFLOW_T1_T2, 1, correlationId, input, null, null);
logger.info("testSimpleWorkflow.wfid= {}", workflowInstanceId);
assertNotNull(workflowInstanceId);
Workflow workflow = workflowExecutionService.getExecutionStatus(workflowInstanceId, true);
assertNotNull(workflow);
assertEquals(RUNNING, workflow.getStatus());
// The very first task is the one that should be scheduled.
assertEquals(1, workflow.getTasks().size());
boolean failed = false;
try {
workflowExecutor.rewind(workflowInstanceId, false);
} catch (ApplicationException ae) {
failed = true;
}
assertTrue(failed);
// Polling for the first task
Task task = workflowExecutionService.poll("junit_task_1", "task1.junit.worker");
assertNotNull(task);
assertEquals("junit_task_1", task.getTaskType());
assertTrue(workflowExecutionService.ackTaskReceived(task.getTaskId()));
assertEquals(workflowInstanceId, task.getWorkflowInstanceId());
workflowExecutor.decide(workflowInstanceId);
String task1Op = "task1.Done";
List<Task> tasks = workflowExecutionService.getTasks(task.getTaskType(), null, 1);
assertNotNull(tasks);
assertEquals(1, tasks.size());
task = tasks.get(0);
assertEquals(workflowInstanceId, task.getWorkflowInstanceId());
task.getOutputData().put("op", task1Op);
task.setStatus(COMPLETED);
workflowExecutionService.updateTask(task);
workflow = workflowExecutionService.getExecutionStatus(workflowInstanceId, false);
assertNotNull(workflow);
assertNotNull(workflow.getOutput());
task = workflowExecutionService.poll("junit_task_2", "task2.junit.worker");
assertNotNull(task);
assertEquals("junit_task_2", task.getTaskType());
assertTrue(workflowExecutionService.ackTaskReceived(task.getTaskId()));
String task2Input = (String) task.getInputData().get("tp2");
assertNotNull("Found=" + task.getInputData(), task2Input);
assertEquals(task1Op, task2Input);
task2Input = (String) task.getInputData().get("tp1");
assertNotNull(task2Input);
assertEquals(inputParam1, task2Input);
task.setStatus(COMPLETED);
task.setReasonForIncompletion("unit test failure");
workflowExecutionService.updateTask(task);
workflow = workflowExecutionService.getExecutionStatus(workflowInstanceId, true);
assertNotNull(workflow);
assertEquals(WorkflowStatus.COMPLETED, workflow.getStatus());
tasks = workflow.getTasks();
assertNotNull(tasks);
assertEquals(2, tasks.size());
assertTrue("Found " + workflow.getOutput().toString(), workflow.getOutput().containsKey("o3"));
assertEquals("task1.Done", workflow.getOutput().get("o3"));
}
use of com.netflix.conductor.core.execution.ApplicationException in project conductor by Netflix.
the class AbstractWorkflowServiceTest method testSimpleWorkflowFailureWithTerminalError.
@Test
public void testSimpleWorkflowFailureWithTerminalError() throws Exception {
clearWorkflows();
TaskDef taskDef = notFoundSafeGetTaskDef("junit_task_1");
taskDef.setRetryCount(1);
metadataService.updateTaskDef(taskDef);
WorkflowDef workflowDef = metadataService.getWorkflowDef(LINEAR_WORKFLOW_T1_T2, 1);
assertNotNull(workflowDef);
Map<String, Object> outputParameters = workflowDef.getOutputParameters();
outputParameters.put("validationErrors", "${t1.output.ErrorMessage}");
metadataService.updateWorkflowDef(workflowDef);
String correlationId = "unit_test_1";
Map<String, Object> input = new HashMap<>();
input.put("param1", "p1 value");
input.put("param2", "p2 value");
String workflowInstanceId = startOrLoadWorkflowExecution("simpleWorkflowFailureWithTerminalError", LINEAR_WORKFLOW_T1_T2, 1, correlationId, input, null, null);
assertNotNull(workflowInstanceId);
Workflow workflow = workflowExecutionService.getExecutionStatus(workflowInstanceId, true);
assertNotNull(workflow);
assertEquals(workflow.getReasonForIncompletion(), RUNNING, workflow.getStatus());
// The very first task is the one that should be scheduled.
assertEquals(1, workflow.getTasks().size());
boolean failed = false;
try {
workflowExecutor.rewind(workflowInstanceId, false);
} catch (ApplicationException ae) {
failed = true;
}
assertTrue(failed);
// Polling for the first task should return the same task as before
Task task = workflowExecutionService.poll("junit_task_1", "task1.junit.worker");
assertNotNull(task);
assertEquals("junit_task_1", task.getTaskType());
assertTrue(workflowExecutionService.ackTaskReceived(task.getTaskId()));
assertEquals(workflowInstanceId, task.getWorkflowInstanceId());
TaskResult taskResult = new TaskResult(task);
taskResult.setReasonForIncompletion("NON TRANSIENT ERROR OCCURRED: An integration point required to complete the task is down");
taskResult.setStatus(TaskResult.Status.FAILED_WITH_TERMINAL_ERROR);
taskResult.addOutputData("TERMINAL_ERROR", "Integration endpoint down: FOOBAR");
taskResult.addOutputData("ErrorMessage", "There was a terminal error");
workflowExecutionService.updateTask(taskResult);
workflowExecutor.decide(workflowInstanceId);
workflow = workflowExecutionService.getExecutionStatus(workflowInstanceId, true);
TaskDef junit_task_1 = notFoundSafeGetTaskDef("junit_task_1");
Task t1 = workflow.getTaskByRefName("t1");
assertNotNull(workflow);
assertEquals(WorkflowStatus.FAILED, workflow.getStatus());
assertEquals("NON TRANSIENT ERROR OCCURRED: An integration point required to complete the task is down", workflow.getReasonForIncompletion());
// Configured retries at the task definition level
assertEquals(1, junit_task_1.getRetryCount());
// Actual retries done on the task
assertEquals(0, t1.getRetryCount());
assertTrue(workflow.getOutput().containsKey("o1"));
assertEquals("p1 value", workflow.getOutput().get("o1"));
assertEquals(workflow.getOutput().get("validationErrors").toString(), "There was a terminal error");
outputParameters.remove("validationErrors");
metadataService.updateWorkflowDef(workflowDef);
}
use of com.netflix.conductor.core.execution.ApplicationException in project conductor by Netflix.
the class AbstractWorkflowServiceTest method testKafkaTaskDefTemplateFailure.
@Test
public void testKafkaTaskDefTemplateFailure() throws Exception {
try {
registerKafkaWorkflow();
} catch (ApplicationException e) {
}
Map<String, Object> input = getKafkaInput();
String workflowInstanceId = startOrLoadWorkflowExecution("template_kafka_workflow", 1, "testTaskDefTemplate", input, null, null);
assertNotNull(workflowInstanceId);
Workflow workflow = workflowExecutionService.getExecutionStatus(workflowInstanceId, true);
assertNotNull(workflow);
assertTrue(workflow.getReasonForIncompletion(), !workflow.getStatus().isTerminal());
assertEquals(1, workflow.getTasks().size());
Task task = workflow.getTasks().get(0);
Map<String, Object> taskInput = task.getInputData();
assertNotNull(taskInput);
assertTrue(taskInput.containsKey("kafka_request"));
assertTrue(taskInput.get("kafka_request") instanceof Map);
String expected = "{\"kafka_request\":{\"topic\":\"test_kafka_topic\",\"bootStrapServers\":\"localhost:9092\",\"value\":{\"requestDetails\":{\"key1\":\"value1\",\"key2\":42},\"outputPath\":\"s3://bucket/outputPath\",\"inputPaths\":[\"file://path1\",\"file://path2\"]}}}";
assertEquals(expected, objectMapper.writeValueAsString(taskInput));
TaskResult taskResult = new TaskResult(task);
taskResult.setReasonForIncompletion("NON TRANSIENT ERROR OCCURRED: An integration point required to complete the task is down");
taskResult.setStatus(TaskResult.Status.FAILED);
taskResult.addOutputData("TERMINAL_ERROR", "Integration endpoint down: FOOBAR");
taskResult.addOutputData("ErrorMessage", "There was a terminal error");
// Polling for the first task
Task task1 = workflowExecutionService.poll("KAFKA_PUBLISH", "test");
assertNotNull(task1);
assertTrue(workflowExecutionService.ackTaskReceived(task1.getTaskId()));
assertEquals(workflowInstanceId, task1.getWorkflowInstanceId());
workflowExecutionService.updateTask(taskResult);
workflowExecutor.decide(workflowInstanceId);
workflow = workflowExecutionService.getExecutionStatus(workflowInstanceId, true);
assertNotNull(workflow);
assertEquals(WorkflowStatus.FAILED, workflow.getStatus());
}
Aggregations