use of com.netflix.conductor.core.events.queue.Message in project conductor by Netflix.
the class PerformanceTest method testQueueDaoPerformance.
@Test
public void testQueueDaoPerformance() throws InterruptedException {
AtomicBoolean stop = new AtomicBoolean(false);
Stopwatch start = Stopwatch.createStarted();
AtomicInteger poppedCoutner = new AtomicInteger(0);
HashMultiset<String> allPopped = HashMultiset.create();
// Consumers - workers
for (int i = 0; i < WORKERS; i++) {
THREADPOOL.submit(() -> {
while (!stop.get()) {
List<Message> pop = Q.pollMessages(QUEUE, WORKER_BATCH, WORKER_BATCH_TIMEOUT);
logger.info("Popped {} messages", pop.size());
poppedCoutner.accumulateAndGet(pop.size(), Integer::sum);
if (pop.size() == 0) {
try {
Thread.sleep(200);
} catch (InterruptedException e) {
throw new RuntimeException(e);
}
} else {
logger.info("Popped {}", pop.stream().map(Message::getId).collect(Collectors.toList()));
}
pop.forEach(popped -> {
synchronized (allPopped) {
allPopped.add(popped.getId());
}
boolean exists = Q.containsMessage(QUEUE, popped.getId());
boolean ack = Q.ack(QUEUE, popped.getId());
if (ack && exists) {
// OK
} else {
logger.error("Exists & Ack did not succeed for msg: {}", popped);
}
});
}
});
}
// Producers
List<Future<?>> producers = Lists.newArrayList();
for (int i = 0; i < PRODUCERS; i++) {
Future<?> producer = THREADPOOL.submit(() -> {
try {
// N messages
for (int j = 0; j < MSGS / PRODUCER_BATCH; j++) {
List<Message> randomMessages = getRandomMessages(PRODUCER_BATCH);
Q.push(QUEUE, randomMessages);
logger.info("Pushed {} messages", PRODUCER_BATCH);
logger.info("Pushed {}", randomMessages.stream().map(Message::getId).collect(Collectors.toList()));
}
logger.info("Pushed ALL");
} catch (Exception e) {
logger.error("Something went wrong with producer", e);
throw new RuntimeException(e);
}
});
producers.add(producer);
}
// Observers
for (int i = 0; i < OBSERVERS; i++) {
THREADPOOL.submit(() -> {
while (!stop.get()) {
try {
int size = Q.getSize(QUEUE);
Q.queuesDetail();
logger.info("Size {} messages", size);
} catch (Exception e) {
logger.info("Queue size failed, nevermind");
}
try {
Thread.sleep(OBSERVER_DELAY);
} catch (InterruptedException e) {
throw new RuntimeException(e);
}
}
});
}
// Consumers - unack processor
for (int i = 0; i < UNACK_RUNNERS; i++) {
THREADPOOL.submit(() -> {
while (!stop.get()) {
try {
Q.processUnacks(QUEUE);
} catch (Exception e) {
logger.info("Unack failed, nevermind", e);
continue;
}
logger.info("Unacked");
try {
Thread.sleep(UNACK_DELAY);
} catch (InterruptedException e) {
throw new RuntimeException(e);
}
}
});
}
long elapsed;
while (true) {
try {
Thread.sleep(COMPLETION_MONITOR_DELAY);
} catch (InterruptedException e) {
throw new RuntimeException(e);
}
int size = Q.getSize(QUEUE);
logger.info("MONITOR SIZE : {}", size);
if (size == 0 && producers.stream().map(Future::isDone).reduce(true, (b1, b2) -> b1 && b2)) {
elapsed = start.elapsed(TimeUnit.MILLISECONDS);
stop.set(true);
break;
}
}
THREADPOOL.awaitTermination(10, TimeUnit.SECONDS);
THREADPOOL.shutdown();
logger.info("Finished in {} ms", elapsed);
logger.info("Throughput {} msgs/second", ((MSGS * PRODUCERS) / (elapsed * 1.0)) * 1000);
logger.info("Threads finished");
if (poppedCoutner.get() != MSGS * PRODUCERS) {
synchronized (allPopped) {
List<String> duplicates = allPopped.entrySet().stream().filter(stringEntry -> stringEntry.getCount() > 1).map(stringEntry -> stringEntry.getElement() + ": " + stringEntry.getCount()).collect(Collectors.toList());
logger.error("Found duplicate pops: " + duplicates);
}
throw new RuntimeException("Popped " + poppedCoutner.get() + " != produced: " + MSGS * PRODUCERS);
}
}
use of com.netflix.conductor.core.events.queue.Message in project conductor by Netflix.
the class WorkflowStatusPublisherIntegrationTest method testListenerOnTerminatedWorkflow.
@Test
public void testListenerOnTerminatedWorkflow() throws IOException {
String id = startOrLoadWorkflowExecution(LINEAR_WORKFLOW_T1_T2, 1, "testWorkflowTerminatedListener", new HashMap<>(), null, null);
workflowExecutor.terminateWorkflow(id, INCOMPLETION_REASON);
List<Message> callbackMessages = queueDAO.pollMessages(CALLBACK_QUEUE, 1, 200);
queueDAO.ack(CALLBACK_QUEUE, callbackMessages.get(0).getId());
WorkflowSummary payload = mapper.readValue(callbackMessages.get(0).getPayload(), WorkflowSummary.class);
assertEquals(id, callbackMessages.get(0).getId());
assertEquals(LINEAR_WORKFLOW_T1_T2, payload.getWorkflowType());
assertEquals("testWorkflowTerminatedListener", payload.getCorrelationId());
assertEquals(Workflow.WorkflowStatus.TERMINATED, payload.getStatus());
assertEquals(INCOMPLETION_REASON, payload.getReasonForIncompletion());
}
use of com.netflix.conductor.core.events.queue.Message in project conductor by Netflix.
the class AMQPObservableQueue method receiveMessagesFromQueue.
private void receiveMessagesFromQueue(String queueName) throws Exception {
int nb = 0;
Consumer consumer = new DefaultConsumer(channel) {
@Override
public void handleDelivery(final String consumerTag, final Envelope envelope, final AMQP.BasicProperties properties, final byte[] body) throws IOException {
try {
Message message = asMessage(settings, new GetResponse(envelope, properties, body, Integer.MAX_VALUE));
if (message != null) {
if (logger.isDebugEnabled()) {
logger.debug("Got message with ID {} and receipt {}", message.getId(), message.getReceipt());
}
messages.add(message);
logger.info("receiveMessagesFromQueue- End method {}", messages);
}
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
} catch (Exception e) {
}
}
};
getOrCreateChannel().basicConsume(queueName, false, consumer);
Monitors.recordEventQueueMessagesProcessed(getType(), queueName, messages.size());
return;
}
use of com.netflix.conductor.core.events.queue.Message in project conductor by Netflix.
the class DynoQueueStatusPublisher method workflowToMessage.
private Message workflowToMessage(Workflow workflow) {
String jsonWfSummary;
WorkflowSummary summary = new WorkflowSummary(workflow);
try {
jsonWfSummary = objectMapper.writeValueAsString(summary);
} catch (JsonProcessingException e) {
LOGGER.error("Failed to convert WorkflowSummary: {} to String. Exception: {}", summary, e);
throw new RuntimeException(e);
}
return new Message(workflow.getWorkflowId(), jsonWfSummary, null);
}
use of com.netflix.conductor.core.events.queue.Message in project conductor by Netflix.
the class QueueManager method update.
private void update(Map<String, Object> externalIdMap, Map<String, Object> output, Status status) throws Exception {
Map<String, Object> outputMap = new HashMap<>();
outputMap.put("externalId", objectMapper.writeValueAsString(externalIdMap));
outputMap.putAll(output);
Message msg = new Message(UUID.randomUUID().toString(), objectMapper.writeValueAsString(outputMap), null);
ObservableQueue queue = queues.get(status);
if (queue == null) {
throw new IllegalArgumentException("There is no queue for handling " + status.toString() + " status");
}
queue.publish(Arrays.asList(msg));
}
Aggregations