use of com.netflix.conductor.core.events.queue.Message in project conductor by Netflix.
the class ElasticSearchRestDAOV7 method mapGetMessagesResponse.
private List<Message> mapGetMessagesResponse(SearchResponse response) throws IOException {
SearchHit[] hits = response.getHits().getHits();
TypeFactory factory = TypeFactory.defaultInstance();
MapType type = factory.constructMapType(HashMap.class, String.class, String.class);
List<Message> messages = new ArrayList<>(hits.length);
for (SearchHit hit : hits) {
String source = hit.getSourceAsString();
Map<String, String> mapSource = objectMapper.readValue(source, type);
Message msg = new Message(mapSource.get("messageId"), mapSource.get("payload"), null);
messages.add(msg);
}
return messages;
}
use of com.netflix.conductor.core.events.queue.Message in project conductor by Netflix.
the class MySQLQueueDAOTest method pollDeferredMessagesTest.
/**
* Test fix for https://github.com/Netflix/conductor/issues/448
* @since 1.8.2-rc5
*/
@Test
public void pollDeferredMessagesTest() throws InterruptedException {
final List<Message> messages = new ArrayList<>();
final String queueName = "issue448_testQueue";
final int totalSize = 10;
for (int i = 0; i < totalSize; i++) {
int offset = 0;
if (i < 5) {
offset = 0;
} else if (i == 6 || i == 7) {
// Purposefully skipping id:5 to test out of order deliveries
// Set id:6 and id:7 for a 2s delay to be picked up in the second polling batch
offset = 5;
} else {
// Set all other queue messages to have enough of a delay that they won't accidentally
// be picked up.
offset = 10_000 + i;
}
String payload = "{\"id\": " + i + ",\"offset_time_seconds\":" + offset + "}";
Message m = new Message("testmsg-" + i, payload, "");
messages.add(m);
dao.push(queueName, "testmsg-" + i, offset);
}
// Assert that all messages were persisted and no extras are in there
assertEquals("Queue size mismatch", totalSize, dao.getSize(queueName));
final int firstPollSize = 4;
List<Message> firstPoll = dao.pollMessages(queueName, firstPollSize, 100);
assertNotNull("First poll was null", firstPoll);
assertFalse("First poll was empty", firstPoll.isEmpty());
assertEquals("First poll size mismatch", firstPollSize, firstPoll.size());
List<String> firstPollMessageIds = messages.stream().map(Message::getId).collect(Collectors.toList()).subList(0, firstPollSize + 1);
for (int i = 0; i < firstPollSize; i++) {
String actual = firstPoll.get(i).getId();
assertTrue("Unexpected Id: " + actual, firstPollMessageIds.contains(actual));
}
final int secondPollSize = 3;
// Sleep a bit to get the next batch of messages
LOGGER.debug("Sleeping for second poll...");
Thread.sleep(5_000);
// Poll for many more messages than expected
List<Message> secondPoll = dao.pollMessages(queueName, secondPollSize + 10, 100);
assertNotNull("Second poll was null", secondPoll);
assertFalse("Second poll was empty", secondPoll.isEmpty());
assertEquals("Second poll size mismatch", secondPollSize, secondPoll.size());
List<String> expectedIds = Arrays.asList("testmsg-4", "testmsg-6", "testmsg-7");
for (int i = 0; i < secondPollSize; i++) {
String actual = secondPoll.get(i).getId();
assertTrue("Unexpected Id: " + actual, expectedIds.contains(actual));
}
// Assert that the total queue size hasn't changed
assertEquals("Total queue size should have remained the same", totalSize, dao.getSize(queueName));
// Assert that our un-popped messages match our expected size
final long expectedSize = totalSize - firstPollSize - secondPollSize;
try (Connection c = testUtil.getDataSource().getConnection()) {
String UNPOPPED = "SELECT COUNT(*) FROM queue_message WHERE queue_name = ? AND popped = false";
try (Query q = new Query(testUtil.getObjectMapper(), c, UNPOPPED)) {
long count = q.addParameter(queueName).executeCount();
assertEquals("Remaining queue size mismatch", expectedSize, count);
}
} catch (Exception ex) {
fail(ex.getMessage());
}
}
use of com.netflix.conductor.core.events.queue.Message in project conductor by Netflix.
the class PostgresQueueDAO method popMessages.
private List<Message> popMessages(Connection connection, String queueName, int count, int timeout) {
List<Message> messages = peekMessages(connection, queueName, count);
if (messages.isEmpty()) {
return messages;
}
List<Message> poppedMessages = new ArrayList<>();
for (Message message : messages) {
final String POP_MESSAGE = "UPDATE queue_message SET popped = true WHERE queue_name = ? AND message_id = ? AND popped = false";
int result = query(connection, POP_MESSAGE, q -> q.addParameter(queueName).addParameter(message.getId()).executeUpdate());
if (result == 1) {
poppedMessages.add(message);
}
}
return poppedMessages;
}
use of com.netflix.conductor.core.events.queue.Message in project conductor by Netflix.
the class PostgresQueueDAOTest method pollDeferredMessagesTest.
/**
* Test fix for https://github.com/Netflix/conductor/issues/448
* @since 1.8.2-rc5
*/
@Test
public void pollDeferredMessagesTest() throws InterruptedException {
final List<Message> messages = new ArrayList<>();
final String queueName = "issue448_testQueue";
final int totalSize = 10;
for (int i = 0; i < totalSize; i++) {
int offset = 0;
if (i < 5) {
offset = 0;
} else if (i == 6 || i == 7) {
// Purposefully skipping id:5 to test out of order deliveries
// Set id:6 and id:7 for a 2s delay to be picked up in the second polling batch
offset = 5;
} else {
// Set all other queue messages to have enough of a delay that they won't accidentally
// be picked up.
offset = 10_000 + i;
}
String payload = "{\"id\": " + i + ",\"offset_time_seconds\":" + offset + "}";
Message m = new Message("testmsg-" + i, payload, "");
messages.add(m);
dao.push(queueName, "testmsg-" + i, offset);
}
// Assert that all messages were persisted and no extras are in there
assertEquals("Queue size mismatch", totalSize, dao.getSize(queueName));
final int firstPollSize = 4;
List<Message> firstPoll = dao.pollMessages(queueName, firstPollSize, 100);
assertNotNull("First poll was null", firstPoll);
assertFalse("First poll was empty", firstPoll.isEmpty());
assertEquals("First poll size mismatch", firstPollSize, firstPoll.size());
List<String> firstPollMessageIds = messages.stream().map(Message::getId).collect(Collectors.toList()).subList(0, firstPollSize + 1);
for (int i = 0; i < firstPollSize; i++) {
String actual = firstPoll.get(i).getId();
assertTrue("Unexpected Id: " + actual, firstPollMessageIds.contains(actual));
}
final int secondPollSize = 3;
// Sleep a bit to get the next batch of messages
LOGGER.debug("Sleeping for second poll...");
Thread.sleep(5_000);
// Poll for many more messages than expected
List<Message> secondPoll = dao.pollMessages(queueName, secondPollSize + 10, 100);
assertNotNull("Second poll was null", secondPoll);
assertFalse("Second poll was empty", secondPoll.isEmpty());
assertEquals("Second poll size mismatch", secondPollSize, secondPoll.size());
List<String> expectedIds = Arrays.asList("testmsg-4", "testmsg-6", "testmsg-7");
for (int i = 0; i < secondPollSize; i++) {
String actual = secondPoll.get(i).getId();
assertTrue("Unexpected Id: " + actual, expectedIds.contains(actual));
}
// Assert that the total queue size hasn't changed
assertEquals("Total queue size should have remained the same", totalSize, dao.getSize(queueName));
// Assert that our un-popped messages match our expected size
final long expectedSize = totalSize - firstPollSize - secondPollSize;
try (Connection c = testUtil.getDataSource().getConnection()) {
String UNPOPPED = "SELECT COUNT(*) FROM queue_message WHERE queue_name = ? AND popped = false";
try (Query q = new Query(testUtil.getObjectMapper(), c, UNPOPPED)) {
long count = q.addParameter(queueName).executeCount();
assertEquals("Remaining queue size mismatch", expectedSize, count);
}
} catch (Exception ex) {
fail(ex.getMessage());
}
}
use of com.netflix.conductor.core.events.queue.Message in project conductor by Netflix.
the class PostgresQueueDAOTest method pollMessagesTest.
/**
* Test fix for https://github.com/Netflix/conductor/issues/399
* @since 1.8.2-rc5
*/
@Test
public void pollMessagesTest() {
final List<Message> messages = new ArrayList<>();
final String queueName = "issue399_testQueue";
final int totalSize = 10;
for (int i = 0; i < totalSize; i++) {
String payload = "{\"id\": " + i + ", \"msg\":\"test " + i + "\"}";
Message m = new Message("testmsg-" + i, payload, "");
if (i % 2 == 0) {
// Set priority on message with pair id
m.setPriority(99 - i);
}
messages.add(m);
}
// Populate the queue with our test message batch
dao.push(queueName, ImmutableList.copyOf(messages));
// Assert that all messages were persisted and no extras are in there
assertEquals("Queue size mismatch", totalSize, dao.getSize(queueName));
List<Message> zeroPoll = dao.pollMessages(queueName, 0, 10_000);
assertTrue("Zero poll should be empty", zeroPoll.isEmpty());
final int firstPollSize = 3;
List<Message> firstPoll = dao.pollMessages(queueName, firstPollSize, 10_000);
assertNotNull("First poll was null", firstPoll);
assertFalse("First poll was empty", firstPoll.isEmpty());
assertEquals("First poll size mismatch", firstPollSize, firstPoll.size());
final int secondPollSize = 4;
List<Message> secondPoll = dao.pollMessages(queueName, secondPollSize, 10_000);
assertNotNull("Second poll was null", secondPoll);
assertFalse("Second poll was empty", secondPoll.isEmpty());
assertEquals("Second poll size mismatch", secondPollSize, secondPoll.size());
// Assert that the total queue size hasn't changed
assertEquals("Total queue size should have remained the same", totalSize, dao.getSize(queueName));
// Assert that our un-popped messages match our expected size
final long expectedSize = totalSize - firstPollSize - secondPollSize;
try (Connection c = testUtil.getDataSource().getConnection()) {
String UNPOPPED = "SELECT COUNT(*) FROM queue_message WHERE queue_name = ? AND popped = false";
try (Query q = new Query(testUtil.getObjectMapper(), c, UNPOPPED)) {
long count = q.addParameter(queueName).executeCount();
assertEquals("Remaining queue size mismatch", expectedSize, count);
}
} catch (Exception ex) {
fail(ex.getMessage());
}
}
Aggregations