Search in sources :

Example 1 with RawMessage

use of co.cask.cdap.messaging.data.RawMessage in project cdap by caskdata.

the class MessagingNotificationService method subscribe.

@Override
public <N> Cancellable subscribe(NotificationFeedId feed, NotificationHandler<N> handler, Executor executor) throws NotificationFeedNotFoundException, NotificationFeedException {
    Cancellable subscribeCancellable = super.subscribe(feed, handler, executor);
    // If already has a thread fetching, just return the cancellable.
    if (!needFetch.compareAndSet(false, true)) {
        return subscribeCancellable;
    }
    // Start fetching
    subscribeExecutor.execute(new Runnable() {

        private final long startTime = System.currentTimeMillis();

        private final RetryStrategy scheduleStrategy = RetryStrategies.exponentialDelay(100, 3000, TimeUnit.MILLISECONDS);

        private byte[] messageId;

        private int emptyFetchCount;

        @Override
        public void run() {
            try {
                MessageFetcher fetcher = messagingService.prepareFetch(notificationTopic);
                if (messageId == null) {
                    fetcher.setStartTime(startTime);
                } else {
                    fetcher.setStartMessage(messageId, false);
                }
                emptyFetchCount++;
                try (CloseableIterator<RawMessage> iterator = fetcher.fetch()) {
                    while (iterator.hasNext()) {
                        emptyFetchCount = 0;
                        RawMessage rawMessage = iterator.next();
                        NotificationMessage message = GSON.fromJson(new String(rawMessage.getPayload(), StandardCharsets.UTF_8), NotificationMessage.class);
                        try {
                            LOG.trace("Decoded notification: {}", message);
                            notificationReceived(message.getFeedId(), message.getNotificationJson());
                        } catch (Throwable t) {
                            LOG.warn("Error while processing notification {} with handler {}", message, t);
                        }
                        messageId = rawMessage.getId();
                    }
                }
            } catch (Exception e) {
                LOG.error("Failed to get notification", e);
            }
            // Back-off if it was empty fetch.
            if (emptyFetchCount > 0) {
                // Schedule the next fetch. Exponential strategy doesn't use the time component,
                // so doesn't matter what we passed in
                subscribeExecutor.schedule(this, scheduleStrategy.nextRetry(emptyFetchCount, startTime), TimeUnit.MILLISECONDS);
            } else {
                subscribeExecutor.execute(this);
            }
        }
    });
    return subscribeCancellable;
}
Also used : MessageFetcher(co.cask.cdap.messaging.MessageFetcher) CloseableIterator(co.cask.cdap.api.dataset.lib.CloseableIterator) Cancellable(org.apache.twill.common.Cancellable) NotificationFeedNotFoundException(co.cask.cdap.notifications.feeds.NotificationFeedNotFoundException) NotificationException(co.cask.cdap.notifications.service.NotificationException) NotificationFeedException(co.cask.cdap.notifications.feeds.NotificationFeedException) RawMessage(co.cask.cdap.messaging.data.RawMessage) RetryStrategy(co.cask.cdap.common.service.RetryStrategy)

Example 2 with RawMessage

use of co.cask.cdap.messaging.data.RawMessage in project cdap by caskdata.

the class LeaderElectionMessagingServiceTest method testTransition.

@Test
public void testTransition() throws Throwable {
    final TopicId topicId = NamespaceId.SYSTEM.topic("topic");
    Injector injector1 = createInjector(0);
    Injector injector2 = createInjector(1);
    // Start a messaging service, which would becomes leader
    ZKClientService zkClient1 = injector1.getInstance(ZKClientService.class);
    zkClient1.startAndWait();
    final MessagingService firstService = injector1.getInstance(MessagingService.class);
    if (firstService instanceof Service) {
        ((Service) firstService).startAndWait();
    }
    // Publish a message with the leader
    firstService.publish(StoreRequestBuilder.of(topicId).addPayloads("Testing1").build());
    // Start another messaging service, this one would be follower
    ZKClientService zkClient2 = injector2.getInstance(ZKClientService.class);
    zkClient2.startAndWait();
    final MessagingService secondService = injector2.getInstance(MessagingService.class);
    if (secondService instanceof Service) {
        ((Service) secondService).startAndWait();
    }
    // Try to call the follower, should get service unavailable.
    try {
        secondService.listTopics(NamespaceId.SYSTEM);
        Assert.fail("Expected service unavailable");
    } catch (ServiceUnavailableException e) {
    // Expected
    }
    // Make the ZK session timeout for the leader service. The second one should pickup.
    KillZKSession.kill(zkClient1.getZooKeeperSupplier().get(), zkClient1.getConnectString(), 10000);
    // Publish one more message and then fetch from the current leader
    List<String> messages = Retries.callWithRetries(new Retries.Callable<List<String>, Throwable>() {

        @Override
        public List<String> call() throws Throwable {
            secondService.publish(StoreRequestBuilder.of(topicId).addPayloads("Testing2").build());
            List<String> messages = new ArrayList<>();
            try (CloseableIterator<RawMessage> iterator = secondService.prepareFetch(topicId).fetch()) {
                while (iterator.hasNext()) {
                    messages.add(new String(iterator.next().getPayload(), "UTF-8"));
                }
            }
            return messages;
        }
    }, RetryStrategies.timeLimit(10, TimeUnit.SECONDS, RetryStrategies.fixDelay(1, TimeUnit.SECONDS)));
    Assert.assertEquals(Arrays.asList("Testing1", "Testing2"), messages);
    // Shutdown the current leader. The session timeout one should becomes leader again.
    if (secondService instanceof Service) {
        ((Service) secondService).stopAndWait();
    }
    // Try to fetch message from the current leader again.
    // Should see two messages (because the cache is cleared and fetch is from the backing store).
    messages = Retries.callWithRetries(new Retries.Callable<List<String>, Throwable>() {

        @Override
        public List<String> call() throws Throwable {
            List<String> messages = new ArrayList<>();
            try (CloseableIterator<RawMessage> iterator = firstService.prepareFetch(topicId).fetch()) {
                while (iterator.hasNext()) {
                    messages.add(new String(iterator.next().getPayload(), "UTF-8"));
                }
            }
            return messages;
        }
    }, RetryStrategies.timeLimit(10, TimeUnit.SECONDS, RetryStrategies.fixDelay(1, TimeUnit.SECONDS)));
    Assert.assertEquals(Arrays.asList("Testing1", "Testing2"), messages);
    zkClient1.stopAndWait();
    zkClient2.stopAndWait();
}
Also used : CloseableIterator(co.cask.cdap.api.dataset.lib.CloseableIterator) ArrayList(java.util.ArrayList) MessagingService(co.cask.cdap.messaging.MessagingService) MetricsCollectionService(co.cask.cdap.api.metrics.MetricsCollectionService) NoOpMetricsCollectionService(co.cask.cdap.common.metrics.NoOpMetricsCollectionService) ZKClientService(org.apache.twill.zookeeper.ZKClientService) Service(com.google.common.util.concurrent.Service) ServiceUnavailableException(co.cask.cdap.common.ServiceUnavailableException) Callable(java.util.concurrent.Callable) MessagingService(co.cask.cdap.messaging.MessagingService) ZKClientService(org.apache.twill.zookeeper.ZKClientService) Injector(com.google.inject.Injector) Retries(co.cask.cdap.common.service.Retries) TopicId(co.cask.cdap.proto.id.TopicId) List(java.util.List) ArrayList(java.util.ArrayList) RawMessage(co.cask.cdap.messaging.data.RawMessage) Test(org.junit.Test)

Example 3 with RawMessage

use of co.cask.cdap.messaging.data.RawMessage in project cdap by caskdata.

the class ConcurrentMessageWriterTest method testMultiMaxSequence.

@Test
public void testMultiMaxSequence() throws IOException, InterruptedException {
    TopicId topicId = new NamespaceId("ns1").topic("t1");
    final TopicMetadata metadata = new TopicMetadata(topicId, new HashMap<String, String>(), 1);
    // This test the case when multiple StoreRequests combined exceeding the 65536 payload.
    // See testMaxSequence() for more details when it matters
    // Generate 3 StoreRequests, each with 43690 messages
    int msgCount = StoreRequestWriter.SEQUENCE_ID_LIMIT / 3 * 2;
    int requestCount = 3;
    List<StoreRequest> requests = new ArrayList<>();
    for (int i = 0; i < requestCount; i++) {
        List<String> payloads = new ArrayList<>(msgCount);
        for (int j = 0; j < msgCount; j++) {
            payloads.add(Integer.toString(j));
        }
        requests.add(new TestStoreRequest(topicId, payloads));
    }
    TestStoreRequestWriter testWriter = new TestStoreRequestWriter(new TimeProvider.IncrementalTimeProvider());
    // We use a custom metrics collector here to make all the persist calls reached the same latch,
    // since we know that the ConcurrentMessageWriter will emit a metrics "persist.requested" after enqueued but
    // before flushing.
    // This will make all requests batched together
    final CountDownLatch latch = new CountDownLatch(requestCount);
    final ConcurrentMessageWriter writer = new ConcurrentMessageWriter(testWriter, new MetricsCollector() {

        @Override
        public void increment(String metricName, long value) {
            if ("persist.requested".equals(metricName)) {
                latch.countDown();
                Uninterruptibles.awaitUninterruptibly(latch);
            }
        }

        @Override
        public void gauge(String metricName, long value) {
            LOG.info("MetricsContext.gauge: {} = {}", metricName, value);
        }
    });
    ExecutorService executor = Executors.newFixedThreadPool(3);
    for (final StoreRequest request : requests) {
        executor.submit(new Runnable() {

            @Override
            public void run() {
                try {
                    writer.persist(request, metadata);
                } catch (IOException e) {
                    LOG.error("Failed to persist", e);
                }
            }
        });
    }
    executor.shutdown();
    Assert.assertTrue(executor.awaitTermination(1, TimeUnit.MINUTES));
    // Validates all messages are being written
    List<RawMessage> messages = testWriter.getMessages().get(topicId);
    Assert.assertEquals(requestCount * msgCount, messages.size());
    // We expect the payload is in repeated sequence of [0..msgCount-1]
    int expectedPayload = 0;
    // The sequenceId should be (i % SEQUENCE_ID_LIMIT)
    for (int i = 0; i < messages.size(); i++) {
        RawMessage message = messages.get(i);
        MessageId messageId = new MessageId(message.getId());
        Assert.assertEquals(i / StoreRequestWriter.SEQUENCE_ID_LIMIT, messageId.getPublishTimestamp());
        Assert.assertEquals((short) (i % StoreRequestWriter.SEQUENCE_ID_LIMIT), messageId.getSequenceId());
        Assert.assertEquals(expectedPayload, Integer.parseInt(Bytes.toString(message.getPayload())));
        expectedPayload = (expectedPayload + 1) % msgCount;
    }
}
Also used : ArrayList(java.util.ArrayList) TopicId(co.cask.cdap.proto.id.TopicId) RawMessage(co.cask.cdap.messaging.data.RawMessage) MetricsCollector(co.cask.cdap.api.metrics.MetricsCollector) TimeProvider(co.cask.cdap.common.utils.TimeProvider) StoreRequest(co.cask.cdap.messaging.StoreRequest) IOException(java.io.IOException) CountDownLatch(java.util.concurrent.CountDownLatch) TopicMetadata(co.cask.cdap.messaging.TopicMetadata) ExecutorService(java.util.concurrent.ExecutorService) NamespaceId(co.cask.cdap.proto.id.NamespaceId) MessageId(co.cask.cdap.messaging.data.MessageId) Test(org.junit.Test)

Example 4 with RawMessage

use of co.cask.cdap.messaging.data.RawMessage in project cdap by caskdata.

the class MessagingHttpServiceTest method testBasicPubSub.

@Test
public void testBasicPubSub() throws Exception {
    TopicId topicId = new NamespaceId("ns1").topic("testBasicPubSub");
    // Publish to a non-existing topic should get not found exception
    try {
        client.publish(StoreRequestBuilder.of(topicId).addPayloads("a").build());
        Assert.fail("Expected TopicNotFoundException");
    } catch (TopicNotFoundException e) {
    // Expected
    }
    // Consume from a non-existing topic should get not found exception
    try {
        client.prepareFetch(topicId).fetch();
        Assert.fail("Expected TopicNotFoundException");
    } catch (TopicNotFoundException e) {
    // Expected
    }
    client.createTopic(new TopicMetadata(topicId));
    // Publish a non-transactional message with empty payload should result in failure
    try {
        client.publish(StoreRequestBuilder.of(topicId).build());
        Assert.fail("Expected IllegalArgumentException");
    } catch (IllegalArgumentException e) {
    // Expected
    }
    // Publish a non-tx message, no RollbackDetail is returned
    Assert.assertNull(client.publish(StoreRequestBuilder.of(topicId).addPayloads("m0", "m1").build()));
    // Publish a transactional message, a RollbackDetail should be returned
    RollbackDetail rollbackDetail = client.publish(StoreRequestBuilder.of(topicId).addPayloads("m2").setTransaction(1L).build());
    Assert.assertNotNull(rollbackDetail);
    // Rollback the published message
    client.rollback(topicId, rollbackDetail);
    // Fetch messages non-transactionally (should be able to read all the messages since rolled back messages
    // are still visible until ttl kicks in)
    List<RawMessage> messages = new ArrayList<>();
    try (CloseableIterator<RawMessage> iterator = client.prepareFetch(topicId).fetch()) {
        Iterators.addAll(messages, iterator);
    }
    Assert.assertEquals(3, messages.size());
    for (int i = 0; i < 3; i++) {
        Assert.assertEquals("m" + i, Bytes.toString(messages.get(i).getPayload()));
    }
    // Consume transactionally. It should get only m0 and m1 since m2 has been rolled back
    List<RawMessage> txMessages = new ArrayList<>();
    Transaction transaction = new Transaction(3L, 3L, new long[0], new long[] { 2L }, 2L);
    try (CloseableIterator<RawMessage> iterator = client.prepareFetch(topicId).setStartTime(0).setTransaction(transaction).fetch()) {
        Iterators.addAll(txMessages, iterator);
    }
    Assert.assertEquals(2, txMessages.size());
    for (int i = 0; i < 2; i++) {
        Assert.assertEquals("m" + i, Bytes.toString(messages.get(i).getPayload()));
    }
    // Fetch again from a given message offset exclusively.
    // Expects one message to be fetched
    byte[] startMessageId = messages.get(1).getId();
    try (CloseableIterator<RawMessage> iterator = client.prepareFetch(topicId).setStartMessage(startMessageId, false).fetch()) {
        // It should have only one message (m2)
        Assert.assertTrue(iterator.hasNext());
        RawMessage msg = iterator.next();
        Assert.assertEquals("m2", Bytes.toString(msg.getPayload()));
    }
    // Fetch again from the last message offset exclusively
    // Expects no message to be fetched
    startMessageId = messages.get(2).getId();
    try (CloseableIterator<RawMessage> iterator = client.prepareFetch(topicId).setStartMessage(startMessageId, false).fetch()) {
        Assert.assertFalse(iterator.hasNext());
    }
    // Fetch with start time. It should get both m0 and m1 since they are published in the same request, hence
    // having the same publish time
    startMessageId = messages.get(1).getId();
    try (CloseableIterator<RawMessage> iterator = client.prepareFetch(topicId).setStartTime(new MessageId(startMessageId).getPublishTimestamp()).setLimit(2).fetch()) {
        messages.clear();
        Iterators.addAll(messages, iterator);
    }
    Assert.assertEquals(2, messages.size());
    for (int i = 0; i < 2; i++) {
        Assert.assertEquals("m" + i, Bytes.toString(messages.get(i).getPayload()));
    }
    // Publish 2 messages, one transactionally, one without transaction
    client.publish(StoreRequestBuilder.of(topicId).addPayloads("m3").setTransaction(2L).build());
    client.publish(StoreRequestBuilder.of(topicId).addPayloads("m4").build());
    // Consume without transactional, it should see m2, m3 and m4
    startMessageId = messages.get(1).getId();
    try (CloseableIterator<RawMessage> iterator = client.prepareFetch(topicId).setStartMessage(startMessageId, false).fetch()) {
        messages.clear();
        Iterators.addAll(messages, iterator);
    }
    Assert.assertEquals(3, messages.size());
    for (int i = 0; i < 3; i++) {
        Assert.assertEquals("m" + (i + 2), Bytes.toString(messages.get(i).getPayload()));
    }
    // Consume using a transaction that doesn't have tx = 2L visible. It should get no message as it should block on m3
    transaction = new Transaction(3L, 3L, new long[0], new long[] { 2L }, 2L);
    try (CloseableIterator<RawMessage> iterator = client.prepareFetch(topicId).setStartMessage(startMessageId, false).setTransaction(transaction).fetch()) {
        Assert.assertFalse(iterator.hasNext());
    }
    // Consume using a transaction that has tx = 2L in the invalid list. It should skip m3 and got m4
    transaction = new Transaction(3L, 3L, new long[] { 2L }, new long[0], 0L);
    try (CloseableIterator<RawMessage> iterator = client.prepareFetch(topicId).setStartMessage(startMessageId, false).setTransaction(transaction).fetch()) {
        messages.clear();
        Iterators.addAll(messages, iterator);
    }
    Assert.assertEquals(1, messages.size());
    Assert.assertEquals("m4", Bytes.toString(messages.get(0).getPayload()));
    // Consume using a transaction that has tx = 2L committed. It should get m3 and m4
    transaction = new Transaction(3L, 3L, new long[0], new long[0], 0L);
    try (CloseableIterator<RawMessage> iterator = client.prepareFetch(topicId).setStartMessage(startMessageId, false).setTransaction(transaction).fetch()) {
        messages.clear();
        Iterators.addAll(messages, iterator);
    }
    Assert.assertEquals(2, messages.size());
    for (int i = 0; i < 2; i++) {
        Assert.assertEquals("m" + (i + 3), Bytes.toString(messages.get(i).getPayload()));
    }
    client.deleteTopic(topicId);
}
Also used : RollbackDetail(co.cask.cdap.messaging.RollbackDetail) TopicNotFoundException(co.cask.cdap.api.messaging.TopicNotFoundException) ArrayList(java.util.ArrayList) TopicMetadata(co.cask.cdap.messaging.TopicMetadata) Transaction(org.apache.tephra.Transaction) TopicId(co.cask.cdap.proto.id.TopicId) NamespaceId(co.cask.cdap.proto.id.NamespaceId) RawMessage(co.cask.cdap.messaging.data.RawMessage) MessageId(co.cask.cdap.messaging.data.MessageId) Test(org.junit.Test)

Example 5 with RawMessage

use of co.cask.cdap.messaging.data.RawMessage in project cdap by caskdata.

the class MessagingHttpServiceTest method testDeletes.

@Test
public void testDeletes() throws Exception {
    TopicId topicId = new NamespaceId("ns1").topic("del");
    TopicMetadata metadata = new TopicMetadata(topicId, "ttl", "100");
    for (int j = 0; j < 10; j++) {
        client.createTopic(metadata);
        String m1 = String.format("m%d", j);
        String m2 = String.format("m%d", j + 1);
        Assert.assertNull(client.publish(StoreRequestBuilder.of(topicId).addPayloads(m1, m2).build()));
        // Fetch messages non-transactionally
        List<RawMessage> messages = new ArrayList<>();
        try (CloseableIterator<RawMessage> iterator = client.prepareFetch(topicId).fetch()) {
            Iterators.addAll(messages, iterator);
        }
        Assert.assertEquals(2, messages.size());
        Set<String> receivedMessages = new HashSet<>();
        for (RawMessage message : messages) {
            receivedMessages.add(Bytes.toString(message.getPayload()));
        }
        Assert.assertTrue(receivedMessages.contains(m1));
        Assert.assertTrue(receivedMessages.contains(m2));
        client.deleteTopic(topicId);
    }
}
Also used : ArrayList(java.util.ArrayList) TopicId(co.cask.cdap.proto.id.TopicId) NamespaceId(co.cask.cdap.proto.id.NamespaceId) RawMessage(co.cask.cdap.messaging.data.RawMessage) TopicMetadata(co.cask.cdap.messaging.TopicMetadata) HashSet(java.util.HashSet) Test(org.junit.Test)

Aggregations

RawMessage (co.cask.cdap.messaging.data.RawMessage)15 TopicId (co.cask.cdap.proto.id.TopicId)13 NamespaceId (co.cask.cdap.proto.id.NamespaceId)10 ArrayList (java.util.ArrayList)10 Test (org.junit.Test)10 TopicMetadata (co.cask.cdap.messaging.TopicMetadata)9 TimeProvider (co.cask.cdap.common.utils.TimeProvider)4 MessageId (co.cask.cdap.messaging.data.MessageId)4 IOException (java.io.IOException)4 CloseableIterator (co.cask.cdap.api.dataset.lib.CloseableIterator)2 BadRequestException (co.cask.cdap.common.BadRequestException)2 RollbackDetail (co.cask.cdap.messaging.RollbackDetail)2 HashSet (java.util.HashSet)2 ExecutorService (java.util.concurrent.ExecutorService)2 POST (javax.ws.rs.POST)2 Path (javax.ws.rs.Path)2 GenericDatumReader (org.apache.avro.generic.GenericDatumReader)2 GenericRecord (org.apache.avro.generic.GenericRecord)2 Decoder (org.apache.avro.io.Decoder)2 TopicNotFoundException (co.cask.cdap.api.messaging.TopicNotFoundException)1