Search in sources :

Example 1 with DequeueResult

use of co.cask.cdap.data2.queue.DequeueResult in project cdap by caskdata.

the class QueueToStreamConsumer method poll.

@Override
public DequeueResult<StreamEvent> poll(int maxEvents, long timeout, TimeUnit timeoutUnit) throws IOException, InterruptedException {
    final DequeueResult<byte[]> result = consumer.dequeue(maxEvents);
    // Decode byte array into stream event
    ImmutableList.Builder<StreamEvent> builder = ImmutableList.builder();
    for (byte[] content : result) {
        try {
            builder.add(STREAM_EVENT_CODEC.decodePayload(content));
        } catch (Throwable t) {
            // If failed to decode, it maybe using old (pre 2.1) stream codec. Try to decode with old one.
            ByteBuffer buffer = ByteBuffer.wrap(content);
            SchemaHash schemaHash = new SchemaHash(buffer);
            Preconditions.checkArgument(schemaHash.equals(StreamEventDataCodec.STREAM_DATA_SCHEMA.getSchemaHash()), "Schema from payload not matching with StreamEventData schema.");
            Decoder decoder = new BinaryDecoder(new ByteBufferInputStream(buffer));
            // In old schema, timestamp is not recorded.
            builder.add(new StreamEvent(StreamEventDataCodec.decode(decoder), 0));
        }
    }
    final List<StreamEvent> events = builder.build();
    return new DequeueResult<StreamEvent>() {

        @Override
        public boolean isEmpty() {
            return events.isEmpty();
        }

        @Override
        public void reclaim() {
            result.reclaim();
        }

        @Override
        public int size() {
            return events.size();
        }

        @Override
        public Iterator<StreamEvent> iterator() {
            return events.iterator();
        }
    };
}
Also used : SchemaHash(co.cask.cdap.api.data.schema.SchemaHash) ImmutableList(com.google.common.collect.ImmutableList) StreamEvent(co.cask.cdap.api.flow.flowlet.StreamEvent) ByteBufferInputStream(co.cask.common.io.ByteBufferInputStream) BinaryDecoder(co.cask.cdap.common.io.BinaryDecoder) Decoder(co.cask.cdap.common.io.Decoder) ByteBuffer(java.nio.ByteBuffer) BinaryDecoder(co.cask.cdap.common.io.BinaryDecoder) DequeueResult(co.cask.cdap.data2.queue.DequeueResult)

Example 2 with DequeueResult

use of co.cask.cdap.data2.queue.DequeueResult in project cdap by caskdata.

the class QueueTest method testQueueAbortRetrySkip.

@Test(timeout = TIMEOUT_MS)
public void testQueueAbortRetrySkip() throws Exception {
    QueueName queueName = QueueName.fromFlowlet(NamespaceId.DEFAULT.getEntityName(), "app", "flow", "flowlet", "queuefailure");
    configureGroups(queueName, ImmutableList.of(new ConsumerGroupConfig(0L, 1, DequeueStrategy.FIFO, null), new ConsumerGroupConfig(1L, 1, DequeueStrategy.HASH, "key")));
    List<ConsumerConfig> consumerConfigs = ImmutableList.of(new ConsumerConfig(0, 0, 1, DequeueStrategy.FIFO, null), new ConsumerConfig(1, 0, 1, DequeueStrategy.HASH, "key"));
    createEnqueueRunnable(queueName, 5, 1, null).run();
    try (QueueConsumer fifoConsumer = queueClientFactory.createConsumer(queueName, consumerConfigs.get(0), 2);
        QueueConsumer hashConsumer = queueClientFactory.createConsumer(queueName, consumerConfigs.get(1), 2)) {
        TransactionContext txContext = createTxContext(fifoConsumer, hashConsumer);
        txContext.start();
        Assert.assertEquals(0, Bytes.toInt(fifoConsumer.dequeue().iterator().next()));
        Assert.assertEquals(0, Bytes.toInt(hashConsumer.dequeue().iterator().next()));
        // Abort the consumer transaction
        txContext.abort();
        // Dequeue again in a new transaction, should see the same entries
        txContext.start();
        Assert.assertEquals(0, Bytes.toInt(fifoConsumer.dequeue().iterator().next()));
        Assert.assertEquals(0, Bytes.toInt(hashConsumer.dequeue().iterator().next()));
        txContext.finish();
        // Dequeue again, now should get next entry
        txContext.start();
        Assert.assertEquals(1, Bytes.toInt(fifoConsumer.dequeue().iterator().next()));
        Assert.assertEquals(1, Bytes.toInt(hashConsumer.dequeue().iterator().next()));
        txContext.finish();
        // Dequeue a result and abort.
        txContext.start();
        DequeueResult<byte[]> fifoResult = fifoConsumer.dequeue();
        DequeueResult<byte[]> hashResult = hashConsumer.dequeue();
        Assert.assertEquals(2, Bytes.toInt(fifoResult.iterator().next()));
        Assert.assertEquals(2, Bytes.toInt(hashResult.iterator().next()));
        txContext.abort();
        // Now skip the result with a new transaction.
        txContext.start();
        fifoResult.reclaim();
        hashResult.reclaim();
        txContext.finish();
        // Dequeue again, it should see a new entry
        txContext.start();
        Assert.assertEquals(3, Bytes.toInt(fifoConsumer.dequeue().iterator().next()));
        Assert.assertEquals(3, Bytes.toInt(hashConsumer.dequeue().iterator().next()));
        txContext.finish();
        // Dequeue again, it should see a new entry
        txContext.start();
        Assert.assertEquals(4, Bytes.toInt(fifoConsumer.dequeue().iterator().next()));
        Assert.assertEquals(4, Bytes.toInt(hashConsumer.dequeue().iterator().next()));
        txContext.finish();
    }
    verifyQueueIsEmpty(queueName, consumerConfigs);
}
Also used : QueueConsumer(co.cask.cdap.data2.queue.QueueConsumer) TransactionContext(org.apache.tephra.TransactionContext) ConsumerConfig(co.cask.cdap.data2.queue.ConsumerConfig) QueueName(co.cask.cdap.common.queue.QueueName) ConsumerGroupConfig(co.cask.cdap.data2.queue.ConsumerGroupConfig) Test(org.junit.Test)

Example 3 with DequeueResult

use of co.cask.cdap.data2.queue.DequeueResult in project cdap by caskdata.

the class QueueTest method testConcurrentEnqueue.

@Category(SlowTests.class)
@Test
public void testConcurrentEnqueue() throws Exception {
    // This test is for testing multiple producers that writes with a delay after a transaction started.
    // This is for verifying consumer advances the startKey correctly.
    final QueueName queueName = QueueName.fromFlowlet(NamespaceId.DEFAULT.getEntityName(), "app", "flow", "flowlet", "concurrent");
    configureGroups(queueName, ImmutableList.of(new ConsumerGroupConfig(0, 1, DequeueStrategy.FIFO, null)));
    final CyclicBarrier barrier = new CyclicBarrier(4);
    ConsumerConfig consumerConfig = new ConsumerConfig(0, 0, 1, DequeueStrategy.FIFO, null);
    // Starts three producers to enqueue concurrently. For each entry, starts a TX, sleep, enqueue, commit.
    ExecutorService executor = Executors.newFixedThreadPool(3);
    final int entryCount = 50;
    for (int i = 0; i < 3; i++) {
        final QueueProducer producer = queueClientFactory.createProducer(queueName);
        final int producerId = i + 1;
        executor.execute(new Runnable() {

            @Override
            public void run() {
                try {
                    barrier.await();
                    for (int i = 0; i < entryCount; i++) {
                        TransactionContext txContext = createTxContext(producer);
                        txContext.start();
                        // Sleeps at different rate to make the scan in consumer has higher change to see
                        // the transaction but not the entry (as not yet written)
                        TimeUnit.MILLISECONDS.sleep(producerId * 50);
                        producer.enqueue(new QueueEntry(Bytes.toBytes(i)));
                        txContext.finish();
                    }
                } catch (Exception e) {
                    LOG.error(e.getMessage(), e);
                } finally {
                    Closeables.closeQuietly(producer);
                }
            }
        });
    }
    // sum(0..entryCount) * 3
    int expectedSum = entryCount * (entryCount - 1) / 2 * 3;
    try (QueueConsumer consumer = queueClientFactory.createConsumer(queueName, consumerConfig, 1)) {
        // Trigger starts of producer
        barrier.await();
        int dequeueSum = 0;
        int noProgress = 0;
        while (dequeueSum != expectedSum && noProgress < 200) {
            TransactionContext txContext = createTxContext(consumer);
            txContext.start();
            DequeueResult<byte[]> result = consumer.dequeue();
            if (!result.isEmpty()) {
                noProgress = 0;
                int value = Bytes.toInt(result.iterator().next());
                dequeueSum += value;
            } else {
                noProgress++;
                TimeUnit.MILLISECONDS.sleep(10);
            }
            txContext.finish();
        }
        Assert.assertEquals(expectedSum, dequeueSum);
    }
}
Also used : QueueEntry(co.cask.cdap.data2.queue.QueueEntry) TransactionFailureException(org.apache.tephra.TransactionFailureException) CyclicBarrier(java.util.concurrent.CyclicBarrier) QueueConsumer(co.cask.cdap.data2.queue.QueueConsumer) QueueProducer(co.cask.cdap.data2.queue.QueueProducer) TransactionContext(org.apache.tephra.TransactionContext) ExecutorService(java.util.concurrent.ExecutorService) ConsumerConfig(co.cask.cdap.data2.queue.ConsumerConfig) QueueName(co.cask.cdap.common.queue.QueueName) ConsumerGroupConfig(co.cask.cdap.data2.queue.ConsumerGroupConfig) Category(org.junit.experimental.categories.Category) Test(org.junit.Test)

Example 4 with DequeueResult

use of co.cask.cdap.data2.queue.DequeueResult in project cdap by caskdata.

the class HBaseQueueTest method testReconfigure.

@Test(timeout = 30000L)
public void testReconfigure() throws Exception {
    final QueueName queueName = QueueName.fromFlowlet(NamespaceId.DEFAULT.getEntityName(), "app", "flow", "flowlet", "changeinstances");
    ConsumerGroupConfig groupConfig = new ConsumerGroupConfig(0L, 2, DequeueStrategy.HASH, "key");
    configureGroups(queueName, ImmutableList.of(groupConfig));
    // Enqueue 10 items
    createEnqueueRunnable(queueName, 10, 1, null).run();
    // Map from instance id to items dequeued
    final Multimap<Integer, Integer> dequeued = ArrayListMultimap.create();
    // Consume 2 items for each consumer instances
    for (int instanceId = 0; instanceId < groupConfig.getGroupSize(); instanceId++) {
        final ConsumerConfig consumerConfig = new ConsumerConfig(groupConfig, instanceId);
        try (QueueConsumer consumer = queueClientFactory.createConsumer(queueName, consumerConfig, 1)) {
            Transactions.createTransactionExecutor(executorFactory, (TransactionAware) consumer).execute(new TransactionExecutor.Subroutine() {

                @Override
                public void apply() throws Exception {
                    DequeueResult<byte[]> result = consumer.dequeue(2);
                    Assert.assertEquals(2, result.size());
                    for (byte[] data : result) {
                        dequeued.put(consumerConfig.getInstanceId(), Bytes.toInt(data));
                    }
                }
            });
        }
    }
    // Increase number of instances to 3
    changeInstances(queueName, 0L, 3);
    // Enqueue 10 more items
    createEnqueueRunnable(queueName, 10, 1, null).run();
    groupConfig = new ConsumerGroupConfig(0L, 3, DequeueStrategy.HASH, "key");
    // Dequeue everything
    while (dequeued.size() != 20) {
        for (int instanceId = 0; instanceId < groupConfig.getGroupSize(); instanceId++) {
            final ConsumerConfig consumerConfig = new ConsumerConfig(groupConfig, instanceId);
            try (QueueConsumer consumer = queueClientFactory.createConsumer(queueName, consumerConfig, 1)) {
                Transactions.createTransactionExecutor(executorFactory, (TransactionAware) consumer).execute(new TransactionExecutor.Subroutine() {

                    @Override
                    public void apply() throws Exception {
                        for (byte[] data : consumer.dequeue(20)) {
                            dequeued.put(consumerConfig.getInstanceId(), Bytes.toInt(data));
                        }
                    }
                });
            }
        }
    }
    // Instance 0 should see all evens before change instances
    Assert.assertEquals(ImmutableList.of(0, 2, 4, 6, 8, 0, 3, 6, 9), dequeued.get(0));
    // Instance 1 should see all odds before change instances
    Assert.assertEquals(ImmutableList.of(1, 3, 5, 7, 9, 1, 4, 7), dequeued.get(1));
    // Instance 2 should only see entries after change instances
    Assert.assertEquals(ImmutableList.of(2, 5, 8), dequeued.get(2));
    // All consumers should have empty dequeue now
    for (int instanceId = 0; instanceId < groupConfig.getGroupSize(); instanceId++) {
        final ConsumerConfig consumerConfig = new ConsumerConfig(groupConfig, instanceId);
        try (QueueConsumer consumer = queueClientFactory.createConsumer(queueName, consumerConfig, 1)) {
            Transactions.createTransactionExecutor(executorFactory, (TransactionAware) consumer).execute(new TransactionExecutor.Subroutine() {

                @Override
                public void apply() throws Exception {
                    DequeueResult<byte[]> result = consumer.dequeue(20);
                    Assert.assertTrue(result.isEmpty());
                }
            });
        }
    }
    // Enqueue 6 more items for the 3 instances
    createEnqueueRunnable(queueName, 6, 1, null).run();
    // Reduce to 1 consumer
    changeInstances(queueName, 0L, 1);
    // The consumer 0 should be able to consume all 10 new items
    dequeued.clear();
    final ConsumerConfig consumerConfig = new ConsumerConfig(0L, 0, 1, DequeueStrategy.HASH, "key");
    try (final QueueConsumer consumer = queueClientFactory.createConsumer(queueName, consumerConfig, 1)) {
        while (dequeued.size() != 6) {
            Transactions.createTransactionExecutor(executorFactory, (TransactionAware) consumer).execute(new TransactionExecutor.Subroutine() {

                @Override
                public void apply() throws Exception {
                    for (byte[] data : consumer.dequeue(1)) {
                        dequeued.put(consumerConfig.getInstanceId(), Bytes.toInt(data));
                    }
                }
            });
        }
    }
    Assert.assertEquals(ImmutableList.of(0, 1, 2, 3, 4, 5), dequeued.get(0));
}
Also used : TransactionExecutor(org.apache.tephra.TransactionExecutor) IOException(java.io.IOException) TableNotFoundException(org.apache.hadoop.hbase.TableNotFoundException) QueueConsumer(co.cask.cdap.data2.queue.QueueConsumer) DequeueResult(co.cask.cdap.data2.queue.DequeueResult) TransactionAware(org.apache.tephra.TransactionAware) ConsumerConfig(co.cask.cdap.data2.queue.ConsumerConfig) QueueName(co.cask.cdap.common.queue.QueueName) ConsumerGroupConfig(co.cask.cdap.data2.queue.ConsumerGroupConfig) Test(org.junit.Test) QueueTest(co.cask.cdap.data2.transaction.queue.QueueTest)

Example 5 with DequeueResult

use of co.cask.cdap.data2.queue.DequeueResult in project cdap by caskdata.

the class StreamConsumerTestBase method testNamespacedStreamConsumers.

@Test
public void testNamespacedStreamConsumers() throws Exception {
    // Test two consumers for two streams with the same name, but in different namespaces. Their consumption should be
    // independent of the other.
    String stream = "testNamespacedStreamConsumers";
    StreamId streamId = TEST_NAMESPACE.stream(stream);
    StreamId otherStreamId = OTHER_NAMESPACE.stream(stream);
    StreamAdmin streamAdmin = getStreamAdmin();
    streamAdmin.create(streamId);
    streamAdmin.create(otherStreamId);
    StreamConfig streamConfig = streamAdmin.getConfig(streamId);
    StreamConfig otherStreamConfig = streamAdmin.getConfig(otherStreamId);
    // Writes 5 events to both streams
    writeEvents(streamConfig, "Testing ", 5);
    writeEvents(otherStreamConfig, "Testing ", 5);
    streamAdmin.configureInstances(streamId, 0L, 1);
    streamAdmin.configureInstances(otherStreamId, 0L, 1);
    StreamConsumerFactory consumerFactory = getConsumerFactory();
    StreamConsumer consumer = consumerFactory.create(streamId, "fifo.rollback", new ConsumerConfig(0L, 0, 1, DequeueStrategy.FIFO, null));
    StreamConsumer otherConsumer = consumerFactory.create(otherStreamId, "fifo.rollback", new ConsumerConfig(0L, 0, 1, DequeueStrategy.FIFO, null));
    // Try to dequeue using both consumers
    TransactionContext context = createTxContext(consumer);
    TransactionContext otherContext = createTxContext(otherConsumer);
    context.start();
    otherContext.start();
    // Consume events from the stream in the default namespace
    DequeueResult<StreamEvent> result0 = consumer.poll(1, 1, TimeUnit.SECONDS);
    Assert.assertEquals("Testing 0", Charsets.UTF_8.decode(result0.iterator().next().getBody()).toString());
    context.finish();
    context.start();
    result0 = consumer.poll(1, 1, TimeUnit.SECONDS);
    Assert.assertEquals("Testing 1", Charsets.UTF_8.decode(result0.iterator().next().getBody()).toString());
    context.finish();
    context.start();
    result0 = consumer.poll(1, 1, TimeUnit.SECONDS);
    Assert.assertEquals("Testing 2", Charsets.UTF_8.decode(result0.iterator().next().getBody()).toString());
    context.finish();
    context.start();
    // Even though a stream with the same name has already consumed 3 events, the otherConsumer is for a stream in a
    // different namespace, so it will still be on the initial event.
    DequeueResult<StreamEvent> result1 = otherConsumer.poll(1, 1, TimeUnit.SECONDS);
    Assert.assertEquals("Testing 0", Charsets.UTF_8.decode(result1.iterator().next().getBody()).toString());
    otherContext.finish();
    otherContext.start();
    result0 = consumer.poll(1, 1, TimeUnit.SECONDS);
    result1 = otherConsumer.poll(1, 1, TimeUnit.SECONDS);
    Assert.assertEquals("Testing 3", Charsets.UTF_8.decode(result0.iterator().next().getBody()).toString());
    Assert.assertEquals("Testing 1", Charsets.UTF_8.decode(result1.iterator().next().getBody()).toString());
    // Commit both
    context.finish();
    otherContext.finish();
    consumer.close();
    otherConsumer.close();
}
Also used : StreamId(co.cask.cdap.proto.id.StreamId) TransactionContext(org.apache.tephra.TransactionContext) StreamEvent(co.cask.cdap.api.flow.flowlet.StreamEvent) ConsumerConfig(co.cask.cdap.data2.queue.ConsumerConfig) Test(org.junit.Test)

Aggregations

ConsumerConfig (co.cask.cdap.data2.queue.ConsumerConfig)14 TransactionContext (org.apache.tephra.TransactionContext)11 Test (org.junit.Test)11 QueueConsumer (co.cask.cdap.data2.queue.QueueConsumer)9 ConsumerGroupConfig (co.cask.cdap.data2.queue.ConsumerGroupConfig)8 QueueName (co.cask.cdap.common.queue.QueueName)7 StreamEvent (co.cask.cdap.api.flow.flowlet.StreamEvent)6 DequeueResult (co.cask.cdap.data2.queue.DequeueResult)5 QueueEntry (co.cask.cdap.data2.queue.QueueEntry)5 StreamId (co.cask.cdap.proto.id.StreamId)5 QueueProducer (co.cask.cdap.data2.queue.QueueProducer)4 TransactionFailureException (org.apache.tephra.TransactionFailureException)4 TransactionExecutor (org.apache.tephra.TransactionExecutor)3 QueueTest (co.cask.cdap.data2.transaction.queue.QueueTest)2 IOException (java.io.IOException)2 Properties (java.util.Properties)2 CyclicBarrier (java.util.concurrent.CyclicBarrier)2 ExecutorService (java.util.concurrent.ExecutorService)2 TableNotFoundException (org.apache.hadoop.hbase.TableNotFoundException)2 TransactionAware (org.apache.tephra.TransactionAware)2