Search in sources :

Example 6 with DequeueResult

use of co.cask.cdap.data2.queue.DequeueResult in project cdap by caskdata.

the class StreamConsumerTestBase method testFIFORollback.

@Test
public void testFIFORollback() throws Exception {
    String stream = "testFIFORollback";
    StreamId streamId = TEST_NAMESPACE.stream(stream);
    StreamAdmin streamAdmin = getStreamAdmin();
    streamAdmin.create(streamId);
    StreamConfig streamConfig = streamAdmin.getConfig(streamId);
    // Writes 5 events
    writeEvents(streamConfig, "Testing ", 5);
    streamAdmin.configureInstances(streamId, 0L, 2);
    StreamConsumerFactory consumerFactory = getConsumerFactory();
    StreamConsumer consumer0 = consumerFactory.create(streamId, "fifo.rollback", new ConsumerConfig(0L, 0, 2, DequeueStrategy.FIFO, null));
    StreamConsumer consumer1 = consumerFactory.create(streamId, "fifo.rollback", new ConsumerConfig(0L, 1, 2, DequeueStrategy.FIFO, null));
    // Try to dequeue using both consumers
    TransactionContext context0 = createTxContext(consumer0);
    TransactionContext context1 = createTxContext(consumer1);
    context0.start();
    context1.start();
    DequeueResult<StreamEvent> result0 = consumer0.poll(1, 1, TimeUnit.SECONDS);
    DequeueResult<StreamEvent> result1 = consumer1.poll(1, 1, TimeUnit.SECONDS);
    Assert.assertEquals("Testing 0", Charsets.UTF_8.decode(result0.iterator().next().getBody()).toString());
    Assert.assertEquals("Testing 1", Charsets.UTF_8.decode(result1.iterator().next().getBody()).toString());
    // Commit the first one, rollback the second one.
    context0.finish();
    context1.abort();
    // Dequeue again with the consuemrs
    context0.start();
    context1.start();
    result0 = consumer0.poll(1, 1, TimeUnit.SECONDS);
    result1 = consumer1.poll(1, 1, TimeUnit.SECONDS);
    // Expect consumer 0 keep proceeding while consumer 1 will retry with what it claimed in previous transaction.
    // This is the optimization in FIFO mode to avoid going back and rescanning.
    Assert.assertEquals("Testing 2", Charsets.UTF_8.decode(result0.iterator().next().getBody()).toString());
    Assert.assertEquals("Testing 1", Charsets.UTF_8.decode(result1.iterator().next().getBody()).toString());
    // Commit both
    context0.finish();
    context1.finish();
    consumer0.close();
    consumer1.close();
}
Also used : StreamId(co.cask.cdap.proto.id.StreamId) TransactionContext(org.apache.tephra.TransactionContext) StreamEvent(co.cask.cdap.api.flow.flowlet.StreamEvent) ConsumerConfig(co.cask.cdap.data2.queue.ConsumerConfig) Test(org.junit.Test)

Example 7 with DequeueResult

use of co.cask.cdap.data2.queue.DequeueResult in project cdap by caskdata.

the class StreamConsumerTestBase method testTTL.

@Test
public void testTTL() throws Exception {
    String stream = "testTTL";
    StreamId streamId = TEST_NAMESPACE.stream(stream);
    StreamAdmin streamAdmin = getStreamAdmin();
    // Create stream with ttl of 1 day
    final long ttl = TimeUnit.DAYS.toMillis(1);
    final long currentTime = System.currentTimeMillis();
    final long increment = TimeUnit.SECONDS.toMillis(1);
    final long approxEarliestNonExpiredTime = currentTime - TimeUnit.HOURS.toMillis(1);
    Properties streamProperties = new Properties();
    streamProperties.setProperty(Constants.Stream.TTL, Long.toString(ttl));
    streamProperties.setProperty(Constants.Stream.PARTITION_DURATION, Long.toString(ttl));
    streamAdmin.create(streamId, streamProperties);
    StreamConfig streamConfig = streamAdmin.getConfig(streamId);
    streamAdmin.configureInstances(streamId, 0L, 1);
    StreamConsumerFactory consumerFactory = getConsumerFactory();
    Assert.assertEquals(ttl, streamConfig.getTTL());
    Assert.assertEquals(ttl, streamConfig.getPartitionDuration());
    Set<StreamEvent> expectedEvents = Sets.newTreeSet(STREAM_EVENT_COMPARATOR);
    FileWriter<StreamEvent> writer = getFileWriterFactory().create(streamConfig, 0);
    try {
        // Write 10 expired messages
        writeEvents(streamConfig, "Old event ", 20, new IncrementingClock(0, 1));
        // Write 5 non-expired messages
        expectedEvents.addAll(writeEvents(streamConfig, "New event ", 12, new IncrementingClock(approxEarliestNonExpiredTime, increment)));
    } finally {
        writer.close();
    }
    // Dequeue from stream. Should only get the 5 unexpired events.
    StreamConsumer consumer = consumerFactory.create(streamId, stream, new ConsumerConfig(0L, 0, 1, DequeueStrategy.FIFO, null));
    try {
        verifyEvents(consumer, expectedEvents);
        TransactionContext txContext = createTxContext(consumer);
        txContext.start();
        try {
            // Should be no more pending events
            DequeueResult<StreamEvent> result = consumer.poll(1, 2, TimeUnit.SECONDS);
            Assert.assertTrue(result.isEmpty());
        } finally {
            txContext.finish();
        }
    } finally {
        consumer.close();
    }
}
Also used : StreamId(co.cask.cdap.proto.id.StreamId) StreamEvent(co.cask.cdap.api.flow.flowlet.StreamEvent) Properties(java.util.Properties) TransactionContext(org.apache.tephra.TransactionContext) ConsumerConfig(co.cask.cdap.data2.queue.ConsumerConfig) Test(org.junit.Test)

Example 8 with DequeueResult

use of co.cask.cdap.data2.queue.DequeueResult in project cdap by caskdata.

the class StreamConsumerTestBase method testTTLMultipleEventsWithSameTimestamp.

@Test
public void testTTLMultipleEventsWithSameTimestamp() throws Exception {
    String stream = "testTTLMultipleEventsWithSameTimestamp";
    StreamId streamId = TEST_NAMESPACE.stream(stream);
    StreamAdmin streamAdmin = getStreamAdmin();
    // Create stream with ttl of 1 day
    final long ttl = TimeUnit.DAYS.toMillis(1);
    final long currentTime = System.currentTimeMillis();
    final long increment = TimeUnit.SECONDS.toMillis(1);
    final long approxEarliestNonExpiredTime = currentTime - TimeUnit.HOURS.toMillis(1);
    Properties streamProperties = new Properties();
    streamProperties.setProperty(Constants.Stream.TTL, Long.toString(ttl));
    streamProperties.setProperty(Constants.Stream.PARTITION_DURATION, Long.toString(ttl));
    streamAdmin.create(streamId, streamProperties);
    StreamConfig streamConfig = streamAdmin.getConfig(streamId);
    streamAdmin.configureInstances(streamId, 0L, 1);
    StreamConsumerFactory consumerFactory = getConsumerFactory();
    Assert.assertEquals(ttl, streamConfig.getTTL());
    Assert.assertEquals(ttl, streamConfig.getPartitionDuration());
    // Write 100 expired messages to stream with expired timestamp
    writeEvents(streamConfig, "Old event ", 10, new ConstantClock(0));
    // Write 500 non-expired messages to stream with timestamp approxEarliestNonExpiredTime..currentTime
    Set<StreamEvent> expectedEvents = Sets.newTreeSet(STREAM_EVENT_COMPARATOR);
    FileWriter<StreamEvent> writer = getFileWriterFactory().create(streamConfig, 0);
    try {
        expectedEvents.addAll(writeEvents(writer, "New event pre-flush ", 20, new IncrementingClock(approxEarliestNonExpiredTime, increment, 5)));
        writer.flush();
        expectedEvents.addAll(writeEvents(writer, "New event post-flush ", 20, new IncrementingClock(approxEarliestNonExpiredTime + 1, increment, 5)));
    } finally {
        writer.close();
    }
    StreamConsumer consumer = consumerFactory.create(streamId, stream, new ConsumerConfig(0L, 0, 1, DequeueStrategy.FIFO, null));
    verifyEvents(consumer, expectedEvents);
    TransactionContext txContext = createTxContext(consumer);
    txContext.start();
    try {
        // Should be no more pending events
        DequeueResult<StreamEvent> result = consumer.poll(1, 1, TimeUnit.SECONDS);
        Assert.assertTrue(result.isEmpty());
    } finally {
        txContext.finish();
    }
    consumer.close();
}
Also used : StreamId(co.cask.cdap.proto.id.StreamId) StreamEvent(co.cask.cdap.api.flow.flowlet.StreamEvent) Properties(java.util.Properties) TransactionContext(org.apache.tephra.TransactionContext) ConsumerConfig(co.cask.cdap.data2.queue.ConsumerConfig) Test(org.junit.Test)

Example 9 with DequeueResult

use of co.cask.cdap.data2.queue.DequeueResult in project cdap by caskdata.

the class StreamConsumerTestBase method testFIFOReconfigure.

@Test
public void testFIFOReconfigure() throws Exception {
    String stream = "testReconfigure";
    StreamId streamId = TEST_NAMESPACE.stream(stream);
    StreamAdmin streamAdmin = getStreamAdmin();
    streamAdmin.create(streamId);
    StreamConfig streamConfig = streamAdmin.getConfig(streamId);
    // Writes 5 events
    writeEvents(streamConfig, "Testing ", 5);
    // Configure 3 consumers.
    streamAdmin.configureInstances(streamId, 0L, 3);
    StreamConsumerFactory consumerFactory = getConsumerFactory();
    // Starts three consumers
    List<StreamConsumer> consumers = Lists.newArrayList();
    for (int i = 0; i < 3; i++) {
        consumers.add(consumerFactory.create(streamId, "fifo.reconfigure", new ConsumerConfig(0L, i, 3, DequeueStrategy.FIFO, null)));
    }
    List<TransactionContext> txContexts = Lists.newArrayList();
    for (StreamConsumer consumer : consumers) {
        txContexts.add(createTxContext(consumer));
    }
    for (TransactionContext txContext : txContexts) {
        txContext.start();
    }
    // Consumer an item from each consumer, but only have the first one commit.
    for (int i = 0; i < consumers.size(); i++) {
        DequeueResult<StreamEvent> result = consumers.get(i).poll(1, 1, TimeUnit.SECONDS);
        Assert.assertEquals("Testing " + i, Charsets.UTF_8.decode(result.iterator().next().getBody()).toString());
        if (i == 0) {
            txContexts.get(i).finish();
        } else {
            txContexts.get(i).abort();
        }
    }
    for (StreamConsumer consumer : consumers) {
        consumer.close();
    }
    // Reconfigure to have two consumers.
    streamAdmin.configureInstances(streamId, 0L, 2);
    consumers.clear();
    for (int i = 0; i < 2; i++) {
        consumers.add(consumerFactory.create(streamId, "fifo.reconfigure", new ConsumerConfig(0L, i, 2, DequeueStrategy.FIFO, null)));
    }
    txContexts.clear();
    for (StreamConsumer consumer : consumers) {
        txContexts.add(createTxContext(consumer));
    }
    // Consumer an item from each consumer, they should see all four items.
    Set<String> messages = Sets.newTreeSet();
    boolean done;
    do {
        for (TransactionContext txContext : txContexts) {
            txContext.start();
        }
        done = true;
        for (int i = 0; i < consumers.size(); i++) {
            DequeueResult<StreamEvent> result = consumers.get(i).poll(1, 1, TimeUnit.SECONDS);
            if (result.isEmpty()) {
                continue;
            }
            done = false;
            messages.add(Charsets.UTF_8.decode(result.iterator().next().getBody()).toString());
            txContexts.get(i).finish();
        }
    } while (!done);
    Assert.assertEquals(4, messages.size());
    int count = 1;
    for (String msg : messages) {
        Assert.assertEquals("Testing " + count, msg);
        count++;
    }
    for (StreamConsumer consumer : consumers) {
        consumer.close();
    }
}
Also used : StreamId(co.cask.cdap.proto.id.StreamId) StreamEvent(co.cask.cdap.api.flow.flowlet.StreamEvent) TransactionContext(org.apache.tephra.TransactionContext) ConsumerConfig(co.cask.cdap.data2.queue.ConsumerConfig) Test(org.junit.Test)

Example 10 with DequeueResult

use of co.cask.cdap.data2.queue.DequeueResult in project cdap by caskdata.

the class QueueTest method verifyQueueIsEmpty.

protected void verifyQueueIsEmpty(QueueName queueName, List<ConsumerConfig> consumerConfigs) throws Exception {
    // Verify the queue is empty
    Set<ConsumerGroupConfig> groupConfigs = Sets.newHashSet();
    for (ConsumerConfig consumerConfig : consumerConfigs) {
        try (QueueConsumer consumer = queueClientFactory.createConsumer(queueName, consumerConfig, -1)) {
            groupConfigs.add(new ConsumerGroupConfig(consumerConfig));
            TransactionContext txContext = createTxContext(consumer);
            try {
                txContext.start();
                Assert.assertTrue(consumer.dequeue().isEmpty());
                txContext.finish();
            } catch (TransactionFailureException e) {
                txContext.abort();
                throw Throwables.propagate(e);
            }
        }
    }
    forceEviction(queueName, groupConfigs.size());
    long newGroupId = groupConfigs.size();
    groupConfigs.add(new ConsumerGroupConfig(newGroupId, 1, DequeueStrategy.FIFO, null));
    configureGroups(queueName, groupConfigs);
    // the queue has been consumed by n consumers. Use a consumerId greater than n to make sure it can dequeue.
    ConsumerConfig consumerConfig = new ConsumerConfig(newGroupId, 0, 1, DequeueStrategy.FIFO, null);
    resetConsumerState(queueName, consumerConfig);
    try (QueueConsumer consumer = queueClientFactory.createConsumer(queueName, consumerConfig, -1)) {
        TransactionContext txContext = createTxContext(consumer);
        txContext.start();
        DequeueResult<byte[]> result = consumer.dequeue();
        if (!result.isEmpty()) {
            StringBuilder resultString = new StringBuilder();
            for (byte[] aResult : result) {
                if (resultString.length() > 0) {
                    resultString.append(", ");
                }
                resultString.append(Bytes.toInt(aResult));
            }
            LOG.info("Queue should be empty but returned result: {}, value = ", result, resultString);
        }
        Assert.assertTrue("Entire queue should be evicted after test but dequeue succeeds.", result.isEmpty());
        txContext.abort();
    }
}
Also used : TransactionFailureException(org.apache.tephra.TransactionFailureException) QueueConsumer(co.cask.cdap.data2.queue.QueueConsumer) TransactionContext(org.apache.tephra.TransactionContext) ConsumerConfig(co.cask.cdap.data2.queue.ConsumerConfig) ConsumerGroupConfig(co.cask.cdap.data2.queue.ConsumerGroupConfig)

Aggregations

ConsumerConfig (co.cask.cdap.data2.queue.ConsumerConfig)14 TransactionContext (org.apache.tephra.TransactionContext)11 Test (org.junit.Test)11 QueueConsumer (co.cask.cdap.data2.queue.QueueConsumer)9 ConsumerGroupConfig (co.cask.cdap.data2.queue.ConsumerGroupConfig)8 QueueName (co.cask.cdap.common.queue.QueueName)7 StreamEvent (co.cask.cdap.api.flow.flowlet.StreamEvent)6 DequeueResult (co.cask.cdap.data2.queue.DequeueResult)5 QueueEntry (co.cask.cdap.data2.queue.QueueEntry)5 StreamId (co.cask.cdap.proto.id.StreamId)5 QueueProducer (co.cask.cdap.data2.queue.QueueProducer)4 TransactionFailureException (org.apache.tephra.TransactionFailureException)4 TransactionExecutor (org.apache.tephra.TransactionExecutor)3 QueueTest (co.cask.cdap.data2.transaction.queue.QueueTest)2 IOException (java.io.IOException)2 Properties (java.util.Properties)2 CyclicBarrier (java.util.concurrent.CyclicBarrier)2 ExecutorService (java.util.concurrent.ExecutorService)2 TableNotFoundException (org.apache.hadoop.hbase.TableNotFoundException)2 TransactionAware (org.apache.tephra.TransactionAware)2