use of com.netflix.conductor.core.events.queue.Message in project conductor by Netflix.
the class TestSQSObservableQueue method testException.
@Test
public void testException() {
com.amazonaws.services.sqs.model.Message message = new com.amazonaws.services.sqs.model.Message().withMessageId("test").withBody("").withReceiptHandle("receiptHandle");
Answer<?> answer = (Answer<ReceiveMessageResult>) invocation -> new ReceiveMessageResult();
AmazonSQSClient client = mock(AmazonSQSClient.class);
when(client.listQueues(any(ListQueuesRequest.class))).thenReturn(new ListQueuesResult().withQueueUrls("junit_queue_url"));
when(client.receiveMessage(any(ReceiveMessageRequest.class))).thenThrow(new RuntimeException("Error in SQS communication")).thenReturn(new ReceiveMessageResult().withMessages(message)).thenAnswer(answer);
SQSObservableQueue queue = new SQSObservableQueue.Builder().withQueueName("junit").withClient(client).build();
List<Message> found = new LinkedList<>();
Observable<Message> observable = queue.observe();
assertNotNull(observable);
observable.subscribe(found::add);
Uninterruptibles.sleepUninterruptibly(1000, TimeUnit.MILLISECONDS);
assertEquals(1, found.size());
}
use of com.netflix.conductor.core.events.queue.Message in project conductor by Netflix.
the class AMQPObservableQueueTest method testPublishMessagesToQueueAndDefaultConfiguration.
private void testPublishMessagesToQueueAndDefaultConfiguration(Channel channel, Connection connection, boolean queueExists, boolean useWorkingChannel) throws IOException, TimeoutException {
final Random random = new Random();
final String queueName = RandomStringUtils.randomAlphabetic(30);
final AMQPSettings settings = new AMQPSettings(configuration).fromURI("amqp_queue:" + queueName + "?deliveryMode=2&durable=true&exclusive=false&autoDelete=true");
assertEquals(true, settings.isDurable());
assertEquals(false, settings.isExclusive());
assertEquals(true, settings.autoDelete());
assertEquals(2, settings.getDeliveryMode());
List<GetResponse> queue = buildQueue(random, batchSize);
channel = mockChannelForQueue(channel, useWorkingChannel, queueExists, queueName, queue);
AMQPObservableQueue observableQueue = new AMQPObservableQueue(mockConnectionFactory(connection), addresses, false, settings, batchSize, pollTimeMs);
assertArrayEquals(addresses, observableQueue.getAddresses());
assertEquals(AMQPConstants.AMQP_QUEUE_TYPE, observableQueue.getType());
assertEquals(AMQPConstants.AMQP_QUEUE_TYPE + ":" + queueName + "?deliveryMode=2&durable=true&exclusive=false&autoDelete=true", observableQueue.getName());
assertEquals(queueName, observableQueue.getURI());
assertEquals(batchSize, observableQueue.getBatchSize());
assertEquals(pollTimeMs, observableQueue.getPollTimeInMS());
assertEquals(queue.size(), observableQueue.size());
List<Message> messages = new LinkedList<>();
Observable.range(0, batchSize).forEach((Integer x) -> messages.add(new Message("" + x, "payload: " + x, null)));
assertEquals(batchSize, messages.size());
observableQueue.publish(messages);
if (useWorkingChannel) {
verify(channel, times(batchSize)).basicPublish(eq(StringUtils.EMPTY), eq(queueName), any(AMQP.BasicProperties.class), any(byte[].class));
}
}
use of com.netflix.conductor.core.events.queue.Message in project conductor by Netflix.
the class QueueManager method startMonitor.
private void startMonitor(Status status, ObservableQueue queue) {
queue.observe().subscribe((Message msg) -> {
try {
logger.debug("Got message {}", msg.getPayload());
String payload = msg.getPayload();
JsonNode payloadJSON = objectMapper.readTree(payload);
String externalId = getValue("externalId", payloadJSON);
if (externalId == null || "".equals(externalId)) {
logger.error("No external Id found in the payload {}", payload);
queue.ack(Collections.singletonList(msg));
return;
}
JsonNode json = objectMapper.readTree(externalId);
String workflowId = getValue("workflowId", json);
String taskRefName = getValue("taskRefName", json);
String taskId = getValue("taskId", json);
if (workflowId == null || "".equals(workflowId)) {
// This is a bad message, we cannot process it
logger.error("No workflow id found in the message. {}", payload);
queue.ack(Collections.singletonList(msg));
return;
}
Workflow workflow = executionService.getExecutionStatus(workflowId, true);
Optional<Task> taskOptional;
if (StringUtils.isNotEmpty(taskId)) {
taskOptional = workflow.getTasks().stream().filter(task -> !task.getStatus().isTerminal() && task.getTaskId().equals(taskId)).findFirst();
} else if (StringUtils.isEmpty(taskRefName)) {
logger.error("No taskRefName found in the message. If there is only one WAIT task, will mark it as completed. {}", payload);
taskOptional = workflow.getTasks().stream().filter(task -> !task.getStatus().isTerminal() && task.getTaskType().equals(Wait.NAME)).findFirst();
} else {
taskOptional = workflow.getTasks().stream().filter(task -> !task.getStatus().isTerminal() && task.getReferenceTaskName().equals(taskRefName)).findFirst();
}
if (!taskOptional.isPresent()) {
logger.error("No matching tasks found to be marked as completed for workflow {}, taskRefName {}, taskId {}", workflowId, taskRefName, taskId);
queue.ack(Collections.singletonList(msg));
return;
}
Task task = taskOptional.get();
task.setStatus(status);
task.getOutputData().putAll(objectMapper.convertValue(payloadJSON, _mapType));
executionService.updateTask(task);
List<String> failures = queue.ack(Collections.singletonList(msg));
if (!failures.isEmpty()) {
logger.error("Not able to ack the messages {}", failures.toString());
}
} catch (JsonParseException e) {
logger.error("Bad message? : {} ", msg, e);
queue.ack(Collections.singletonList(msg));
} catch (ApplicationException e) {
if (e.getCode().equals(Code.NOT_FOUND)) {
logger.error("Workflow ID specified is not valid for this environment");
queue.ack(Collections.singletonList(msg));
}
logger.error("Error processing message: {}", msg, e);
} catch (Exception e) {
logger.error("Error processing message: {}", msg, e);
}
}, (Throwable t) -> {
logger.error(t.getMessage(), t);
});
logger.info("QueueListener::STARTED...listening for " + queue.getName());
}
use of com.netflix.conductor.core.events.queue.Message in project conductor by Netflix.
the class MySQLQueueDAOTest method pollMessagesTest.
/**
* Test fix for https://github.com/Netflix/conductor/issues/399
* @since 1.8.2-rc5
*/
@Test
public void pollMessagesTest() {
final List<Message> messages = new ArrayList<>();
final String queueName = "issue399_testQueue";
final int totalSize = 10;
for (int i = 0; i < totalSize; i++) {
String payload = "{\"id\": " + i + ", \"msg\":\"test " + i + "\"}";
Message m = new Message("testmsg-" + i, payload, "");
if (i % 2 == 0) {
// Set priority on message with pair id
m.setPriority(99 - i);
}
messages.add(m);
}
// Populate the queue with our test message batch
dao.push(queueName, ImmutableList.copyOf(messages));
// Assert that all messages were persisted and no extras are in there
assertEquals("Queue size mismatch", totalSize, dao.getSize(queueName));
final int firstPollSize = 3;
List<Message> firstPoll = dao.pollMessages(queueName, firstPollSize, 10_000);
assertNotNull("First poll was null", firstPoll);
assertFalse("First poll was empty", firstPoll.isEmpty());
assertEquals("First poll size mismatch", firstPollSize, firstPoll.size());
final int secondPollSize = 4;
List<Message> secondPoll = dao.pollMessages(queueName, secondPollSize, 10_000);
assertNotNull("Second poll was null", secondPoll);
assertFalse("Second poll was empty", secondPoll.isEmpty());
assertEquals("Second poll size mismatch", secondPollSize, secondPoll.size());
// Assert that the total queue size hasn't changed
assertEquals("Total queue size should have remained the same", totalSize, dao.getSize(queueName));
// Assert that our un-popped messages match our expected size
final long expectedSize = totalSize - firstPollSize - secondPollSize;
try (Connection c = testUtil.getDataSource().getConnection()) {
String UNPOPPED = "SELECT COUNT(*) FROM queue_message WHERE queue_name = ? AND popped = false";
try (Query q = new Query(testUtil.getObjectMapper(), c, UNPOPPED)) {
long count = q.addParameter(queueName).executeCount();
assertEquals("Remaining queue size mismatch", expectedSize, count);
}
} catch (Exception ex) {
fail(ex.getMessage());
}
}
use of com.netflix.conductor.core.events.queue.Message in project conductor by Netflix.
the class MySQLQueueDAO method peekMessages.
private List<Message> peekMessages(Connection connection, String queueName, int count) {
if (count < 1)
return Collections.emptyList();
final String PEEK_MESSAGES = "SELECT message_id, priority, payload FROM queue_message use index(combo_queue_message) WHERE queue_name = ? AND popped = false AND deliver_on <= TIMESTAMPADD(MICROSECOND, 1000, CURRENT_TIMESTAMP) ORDER BY priority DESC, deliver_on, created_on LIMIT ?";
List<Message> messages = query(connection, PEEK_MESSAGES, p -> p.addParameter(queueName).addParameter(count).executeAndFetch(rs -> {
List<Message> results = new ArrayList<>();
while (rs.next()) {
Message m = new Message();
m.setId(rs.getString("message_id"));
m.setPriority(rs.getInt("priority"));
m.setPayload(rs.getString("payload"));
results.add(m);
}
return results;
}));
return messages;
}
Aggregations