Search in sources :

Example 36 with Message

use of com.netflix.conductor.core.events.queue.Message in project conductor by Netflix.

the class PerformanceTest method testQueueDaoPerformance.

@Test
public void testQueueDaoPerformance() throws InterruptedException {
    AtomicBoolean stop = new AtomicBoolean(false);
    Stopwatch start = Stopwatch.createStarted();
    AtomicInteger poppedCoutner = new AtomicInteger(0);
    HashMultiset<String> allPopped = HashMultiset.create();
    // Consumers - workers
    for (int i = 0; i < WORKERS; i++) {
        THREADPOOL.submit(() -> {
            while (!stop.get()) {
                List<Message> pop = Q.pollMessages(QUEUE, WORKER_BATCH, WORKER_BATCH_TIMEOUT);
                logger.info("Popped {} messages", pop.size());
                poppedCoutner.accumulateAndGet(pop.size(), Integer::sum);
                if (pop.size() == 0) {
                    try {
                        Thread.sleep(200);
                    } catch (InterruptedException e) {
                        throw new RuntimeException(e);
                    }
                } else {
                    logger.info("Popped {}", pop.stream().map(Message::getId).collect(Collectors.toList()));
                }
                pop.forEach(popped -> {
                    synchronized (allPopped) {
                        allPopped.add(popped.getId());
                    }
                    boolean exists = Q.containsMessage(QUEUE, popped.getId());
                    boolean ack = Q.ack(QUEUE, popped.getId());
                    if (ack && exists) {
                    // OK
                    } else {
                        logger.error("Exists & Ack did not succeed for msg: {}", popped);
                    }
                });
            }
        });
    }
    // Producers
    List<Future<?>> producers = Lists.newArrayList();
    for (int i = 0; i < PRODUCERS; i++) {
        Future<?> producer = THREADPOOL.submit(() -> {
            try {
                // N messages
                for (int j = 0; j < MSGS / PRODUCER_BATCH; j++) {
                    List<Message> randomMessages = getRandomMessages(PRODUCER_BATCH);
                    Q.push(QUEUE, randomMessages);
                    logger.info("Pushed {} messages", PRODUCER_BATCH);
                    logger.info("Pushed {}", randomMessages.stream().map(Message::getId).collect(Collectors.toList()));
                }
                logger.info("Pushed ALL");
            } catch (Exception e) {
                logger.error("Something went wrong with producer", e);
                throw new RuntimeException(e);
            }
        });
        producers.add(producer);
    }
    // Observers
    for (int i = 0; i < OBSERVERS; i++) {
        THREADPOOL.submit(() -> {
            while (!stop.get()) {
                try {
                    int size = Q.getSize(QUEUE);
                    Q.queuesDetail();
                    logger.info("Size   {} messages", size);
                } catch (Exception e) {
                    logger.info("Queue size failed, nevermind");
                }
                try {
                    Thread.sleep(OBSERVER_DELAY);
                } catch (InterruptedException e) {
                    throw new RuntimeException(e);
                }
            }
        });
    }
    // Consumers - unack processor
    for (int i = 0; i < UNACK_RUNNERS; i++) {
        THREADPOOL.submit(() -> {
            while (!stop.get()) {
                try {
                    Q.processUnacks(QUEUE);
                } catch (Exception e) {
                    logger.info("Unack failed, nevermind", e);
                    continue;
                }
                logger.info("Unacked");
                try {
                    Thread.sleep(UNACK_DELAY);
                } catch (InterruptedException e) {
                    throw new RuntimeException(e);
                }
            }
        });
    }
    long elapsed;
    while (true) {
        try {
            Thread.sleep(COMPLETION_MONITOR_DELAY);
        } catch (InterruptedException e) {
            throw new RuntimeException(e);
        }
        int size = Q.getSize(QUEUE);
        logger.info("MONITOR SIZE : {}", size);
        if (size == 0 && producers.stream().map(Future::isDone).reduce(true, (b1, b2) -> b1 && b2)) {
            elapsed = start.elapsed(TimeUnit.MILLISECONDS);
            stop.set(true);
            break;
        }
    }
    THREADPOOL.awaitTermination(10, TimeUnit.SECONDS);
    THREADPOOL.shutdown();
    logger.info("Finished in {} ms", elapsed);
    logger.info("Throughput {} msgs/second", ((MSGS * PRODUCERS) / (elapsed * 1.0)) * 1000);
    logger.info("Threads finished");
    if (poppedCoutner.get() != MSGS * PRODUCERS) {
        synchronized (allPopped) {
            List<String> duplicates = allPopped.entrySet().stream().filter(stringEntry -> stringEntry.getCount() > 1).map(stringEntry -> stringEntry.getElement() + ": " + stringEntry.getCount()).collect(Collectors.toList());
            logger.error("Found duplicate pops: " + duplicates);
        }
        throw new RuntimeException("Popped " + poppedCoutner.get() + " != produced: " + MSGS * PRODUCERS);
    }
}
Also used : IntStream(java.util.stream.IntStream) Connection(java.sql.Connection) FlywayException(org.flywaydb.core.api.FlywayException) TestConfiguration(com.netflix.conductor.core.execution.TestConfiguration) Stopwatch(com.google.common.base.Stopwatch) Multiset(com.google.common.collect.Multiset) LoggerFactory(org.slf4j.LoggerFactory) ExecutionDAO(com.netflix.conductor.dao.ExecutionDAO) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) Task(com.netflix.conductor.common.metadata.tasks.Task) PostgresConfiguration(com.netflix.conductor.postgres.PostgresConfiguration) ArrayList(java.util.ArrayList) FluentConfiguration(org.flywaydb.core.api.configuration.FluentConfiguration) SQLException(java.sql.SQLException) Future(java.util.concurrent.Future) Lists(com.google.common.collect.Lists) HashMultiset(com.google.common.collect.HashMultiset) PostgresQueueDAO(com.netflix.conductor.dao.postgres.PostgresQueueDAO) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) Workflow(com.netflix.conductor.common.run.Workflow) After(org.junit.After) DataSource(javax.sql.DataSource) PostgresExecutionDAO(com.netflix.conductor.dao.postgres.PostgresExecutionDAO) ExecutorService(java.util.concurrent.ExecutorService) Before(org.junit.Before) Message(com.netflix.conductor.core.events.queue.Message) Logger(org.slf4j.Logger) PostgresDataSourceProvider(com.netflix.conductor.postgres.PostgresDataSourceProvider) BlockingDeque(java.util.concurrent.BlockingDeque) ObjectMapper(com.fasterxml.jackson.databind.ObjectMapper) WorkflowDef(com.netflix.conductor.common.metadata.workflow.WorkflowDef) Test(org.junit.Test) Collectors(java.util.stream.Collectors) Executors(java.util.concurrent.Executors) TimeUnit(java.util.concurrent.TimeUnit) QueueDAO(com.netflix.conductor.dao.QueueDAO) List(java.util.List) Ignore(org.junit.Ignore) Paths(java.nio.file.Paths) LinkedBlockingDeque(java.util.concurrent.LinkedBlockingDeque) Flyway(org.flywaydb.core.Flyway) JsonMapperProvider(com.netflix.conductor.common.utils.JsonMapperProvider) Collections(java.util.Collections) Message(com.netflix.conductor.core.events.queue.Message) Stopwatch(com.google.common.base.Stopwatch) FlywayException(org.flywaydb.core.api.FlywayException) SQLException(java.sql.SQLException) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) Future(java.util.concurrent.Future) Test(org.junit.Test)

Example 37 with Message

use of com.netflix.conductor.core.events.queue.Message in project conductor by Netflix.

the class WorkflowStatusPublisherIntegrationTest method testListenerOnTerminatedWorkflow.

@Test
public void testListenerOnTerminatedWorkflow() throws IOException {
    String id = startOrLoadWorkflowExecution(LINEAR_WORKFLOW_T1_T2, 1, "testWorkflowTerminatedListener", new HashMap<>(), null, null);
    workflowExecutor.terminateWorkflow(id, INCOMPLETION_REASON);
    List<Message> callbackMessages = queueDAO.pollMessages(CALLBACK_QUEUE, 1, 200);
    queueDAO.ack(CALLBACK_QUEUE, callbackMessages.get(0).getId());
    WorkflowSummary payload = mapper.readValue(callbackMessages.get(0).getPayload(), WorkflowSummary.class);
    assertEquals(id, callbackMessages.get(0).getId());
    assertEquals(LINEAR_WORKFLOW_T1_T2, payload.getWorkflowType());
    assertEquals("testWorkflowTerminatedListener", payload.getCorrelationId());
    assertEquals(Workflow.WorkflowStatus.TERMINATED, payload.getStatus());
    assertEquals(INCOMPLETION_REASON, payload.getReasonForIncompletion());
}
Also used : WorkflowSummary(com.netflix.conductor.common.run.WorkflowSummary) Message(com.netflix.conductor.core.events.queue.Message) Test(org.junit.Test)

Example 38 with Message

use of com.netflix.conductor.core.events.queue.Message in project conductor by Netflix.

the class AMQPObservableQueue method receiveMessagesFromQueue.

private void receiveMessagesFromQueue(String queueName) throws Exception {
    int nb = 0;
    Consumer consumer = new DefaultConsumer(channel) {

        @Override
        public void handleDelivery(final String consumerTag, final Envelope envelope, final AMQP.BasicProperties properties, final byte[] body) throws IOException {
            try {
                Message message = asMessage(settings, new GetResponse(envelope, properties, body, Integer.MAX_VALUE));
                if (message != null) {
                    if (logger.isDebugEnabled()) {
                        logger.debug("Got message with ID {} and receipt {}", message.getId(), message.getReceipt());
                    }
                    messages.add(message);
                    logger.info("receiveMessagesFromQueue- End method {}", messages);
                }
            } catch (InterruptedException e) {
                Thread.currentThread().interrupt();
            } catch (Exception e) {
            }
        }
    };
    getOrCreateChannel().basicConsume(queueName, false, consumer);
    Monitors.recordEventQueueMessagesProcessed(getType(), queueName, messages.size());
    return;
}
Also used : Consumer(com.rabbitmq.client.Consumer) DefaultConsumer(com.rabbitmq.client.DefaultConsumer) DefaultConsumer(com.rabbitmq.client.DefaultConsumer) Message(com.netflix.conductor.core.events.queue.Message) BasicProperties(com.rabbitmq.client.BasicProperties) Envelope(com.rabbitmq.client.Envelope) GetResponse(com.rabbitmq.client.GetResponse) TimeoutException(java.util.concurrent.TimeoutException) IOException(java.io.IOException)

Example 39 with Message

use of com.netflix.conductor.core.events.queue.Message in project conductor by Netflix.

the class DynoQueueStatusPublisher method workflowToMessage.

private Message workflowToMessage(Workflow workflow) {
    String jsonWfSummary;
    WorkflowSummary summary = new WorkflowSummary(workflow);
    try {
        jsonWfSummary = objectMapper.writeValueAsString(summary);
    } catch (JsonProcessingException e) {
        LOGGER.error("Failed to convert WorkflowSummary: {} to String. Exception: {}", summary, e);
        throw new RuntimeException(e);
    }
    return new Message(workflow.getWorkflowId(), jsonWfSummary, null);
}
Also used : WorkflowSummary(com.netflix.conductor.common.run.WorkflowSummary) Message(com.netflix.conductor.core.events.queue.Message) JsonProcessingException(com.fasterxml.jackson.core.JsonProcessingException)

Example 40 with Message

use of com.netflix.conductor.core.events.queue.Message in project conductor by Netflix.

the class QueueManager method update.

private void update(Map<String, Object> externalIdMap, Map<String, Object> output, Status status) throws Exception {
    Map<String, Object> outputMap = new HashMap<>();
    outputMap.put("externalId", objectMapper.writeValueAsString(externalIdMap));
    outputMap.putAll(output);
    Message msg = new Message(UUID.randomUUID().toString(), objectMapper.writeValueAsString(outputMap), null);
    ObservableQueue queue = queues.get(status);
    if (queue == null) {
        throw new IllegalArgumentException("There is no queue for handling " + status.toString() + " status");
    }
    queue.publish(Arrays.asList(msg));
}
Also used : Message(com.netflix.conductor.core.events.queue.Message) HashMap(java.util.HashMap) ObservableQueue(com.netflix.conductor.core.events.queue.ObservableQueue)

Aggregations

Message (com.netflix.conductor.core.events.queue.Message)42 ArrayList (java.util.ArrayList)18 Test (org.junit.Test)16 LinkedList (java.util.LinkedList)12 List (java.util.List)11 ObservableQueue (com.netflix.conductor.core.events.queue.ObservableQueue)10 HashMap (java.util.HashMap)10 Collections (java.util.Collections)9 Map (java.util.Map)8 ObjectMapper (com.fasterxml.jackson.databind.ObjectMapper)7 Connection (java.sql.Connection)7 Executors (java.util.concurrent.Executors)7 Task (com.netflix.conductor.common.metadata.tasks.Task)6 TimeUnit (java.util.concurrent.TimeUnit)6 Workflow (com.netflix.conductor.common.run.Workflow)5 QueueDAO (com.netflix.conductor.dao.QueueDAO)5 Collectors (java.util.stream.Collectors)5 SearchHit (org.elasticsearch.search.SearchHit)5 Logger (org.slf4j.Logger)5 LoggerFactory (org.slf4j.LoggerFactory)5