Search in sources :

Example 11 with ConsumerConfig

use of co.cask.cdap.data2.queue.ConsumerConfig in project cdap by caskdata.

the class QueueTest method testOneEnqueueDequeue.

private void testOneEnqueueDequeue(DequeueStrategy strategy) throws Exception {
    // since this is used by more than one test method, ensure uniqueness of the queue name by adding strategy
    QueueName queueName = QueueName.fromFlowlet(NamespaceId.DEFAULT.getEntityName(), "app", "flow", "flowlet", "queue1" + strategy.toString());
    configureGroups(queueName, ImmutableList.of(new ConsumerGroupConfig(0L, 1, strategy, null), new ConsumerGroupConfig(1L, 1, strategy, null)));
    List<ConsumerConfig> consumerConfigs = ImmutableList.of(new ConsumerConfig(0L, 0, 1, strategy, null), new ConsumerConfig(1L, 0, 1, strategy, null));
    try (QueueProducer producer = queueClientFactory.createProducer(queueName)) {
        TransactionContext txContext = createTxContext(producer);
        txContext.start();
        producer.enqueue(new QueueEntry(Bytes.toBytes(55)));
        txContext.finish();
        try (QueueConsumer consumer = queueClientFactory.createConsumer(queueName, consumerConfigs.get(0), 2)) {
            txContext = createTxContext(consumer);
            txContext.start();
            Assert.assertEquals(55, Bytes.toInt(consumer.dequeue().iterator().next()));
            txContext.finish();
        }
    }
    forceEviction(queueName, 2);
    // verifying that consumer of the 2nd group can process items: they were not evicted
    try (QueueConsumer consumer2 = queueClientFactory.createConsumer(queueName, consumerConfigs.get(1), 2)) {
        TransactionContext txContext = createTxContext(consumer2);
        txContext.start();
        Assert.assertEquals(55, Bytes.toInt(consumer2.dequeue().iterator().next()));
        txContext.finish();
    }
    // now all should be evicted
    verifyQueueIsEmpty(queueName, consumerConfigs);
}
Also used : QueueConsumer(co.cask.cdap.data2.queue.QueueConsumer) QueueProducer(co.cask.cdap.data2.queue.QueueProducer) TransactionContext(org.apache.tephra.TransactionContext) ConsumerConfig(co.cask.cdap.data2.queue.ConsumerConfig) QueueName(co.cask.cdap.common.queue.QueueName) ConsumerGroupConfig(co.cask.cdap.data2.queue.ConsumerGroupConfig) QueueEntry(co.cask.cdap.data2.queue.QueueEntry)

Example 12 with ConsumerConfig

use of co.cask.cdap.data2.queue.ConsumerConfig in project cdap by caskdata.

the class HBaseQueueTest method testReconfigure.

@Test(timeout = 30000L)
public void testReconfigure() throws Exception {
    final QueueName queueName = QueueName.fromFlowlet(NamespaceId.DEFAULT.getEntityName(), "app", "flow", "flowlet", "changeinstances");
    ConsumerGroupConfig groupConfig = new ConsumerGroupConfig(0L, 2, DequeueStrategy.HASH, "key");
    configureGroups(queueName, ImmutableList.of(groupConfig));
    // Enqueue 10 items
    createEnqueueRunnable(queueName, 10, 1, null).run();
    // Map from instance id to items dequeued
    final Multimap<Integer, Integer> dequeued = ArrayListMultimap.create();
    // Consume 2 items for each consumer instances
    for (int instanceId = 0; instanceId < groupConfig.getGroupSize(); instanceId++) {
        final ConsumerConfig consumerConfig = new ConsumerConfig(groupConfig, instanceId);
        try (QueueConsumer consumer = queueClientFactory.createConsumer(queueName, consumerConfig, 1)) {
            Transactions.createTransactionExecutor(executorFactory, (TransactionAware) consumer).execute(new TransactionExecutor.Subroutine() {

                @Override
                public void apply() throws Exception {
                    DequeueResult<byte[]> result = consumer.dequeue(2);
                    Assert.assertEquals(2, result.size());
                    for (byte[] data : result) {
                        dequeued.put(consumerConfig.getInstanceId(), Bytes.toInt(data));
                    }
                }
            });
        }
    }
    // Increase number of instances to 3
    changeInstances(queueName, 0L, 3);
    // Enqueue 10 more items
    createEnqueueRunnable(queueName, 10, 1, null).run();
    groupConfig = new ConsumerGroupConfig(0L, 3, DequeueStrategy.HASH, "key");
    // Dequeue everything
    while (dequeued.size() != 20) {
        for (int instanceId = 0; instanceId < groupConfig.getGroupSize(); instanceId++) {
            final ConsumerConfig consumerConfig = new ConsumerConfig(groupConfig, instanceId);
            try (QueueConsumer consumer = queueClientFactory.createConsumer(queueName, consumerConfig, 1)) {
                Transactions.createTransactionExecutor(executorFactory, (TransactionAware) consumer).execute(new TransactionExecutor.Subroutine() {

                    @Override
                    public void apply() throws Exception {
                        for (byte[] data : consumer.dequeue(20)) {
                            dequeued.put(consumerConfig.getInstanceId(), Bytes.toInt(data));
                        }
                    }
                });
            }
        }
    }
    // Instance 0 should see all evens before change instances
    Assert.assertEquals(ImmutableList.of(0, 2, 4, 6, 8, 0, 3, 6, 9), dequeued.get(0));
    // Instance 1 should see all odds before change instances
    Assert.assertEquals(ImmutableList.of(1, 3, 5, 7, 9, 1, 4, 7), dequeued.get(1));
    // Instance 2 should only see entries after change instances
    Assert.assertEquals(ImmutableList.of(2, 5, 8), dequeued.get(2));
    // All consumers should have empty dequeue now
    for (int instanceId = 0; instanceId < groupConfig.getGroupSize(); instanceId++) {
        final ConsumerConfig consumerConfig = new ConsumerConfig(groupConfig, instanceId);
        try (QueueConsumer consumer = queueClientFactory.createConsumer(queueName, consumerConfig, 1)) {
            Transactions.createTransactionExecutor(executorFactory, (TransactionAware) consumer).execute(new TransactionExecutor.Subroutine() {

                @Override
                public void apply() throws Exception {
                    DequeueResult<byte[]> result = consumer.dequeue(20);
                    Assert.assertTrue(result.isEmpty());
                }
            });
        }
    }
    // Enqueue 6 more items for the 3 instances
    createEnqueueRunnable(queueName, 6, 1, null).run();
    // Reduce to 1 consumer
    changeInstances(queueName, 0L, 1);
    // The consumer 0 should be able to consume all 10 new items
    dequeued.clear();
    final ConsumerConfig consumerConfig = new ConsumerConfig(0L, 0, 1, DequeueStrategy.HASH, "key");
    try (final QueueConsumer consumer = queueClientFactory.createConsumer(queueName, consumerConfig, 1)) {
        while (dequeued.size() != 6) {
            Transactions.createTransactionExecutor(executorFactory, (TransactionAware) consumer).execute(new TransactionExecutor.Subroutine() {

                @Override
                public void apply() throws Exception {
                    for (byte[] data : consumer.dequeue(1)) {
                        dequeued.put(consumerConfig.getInstanceId(), Bytes.toInt(data));
                    }
                }
            });
        }
    }
    Assert.assertEquals(ImmutableList.of(0, 1, 2, 3, 4, 5), dequeued.get(0));
}
Also used : TransactionExecutor(org.apache.tephra.TransactionExecutor) IOException(java.io.IOException) TableNotFoundException(org.apache.hadoop.hbase.TableNotFoundException) QueueConsumer(co.cask.cdap.data2.queue.QueueConsumer) DequeueResult(co.cask.cdap.data2.queue.DequeueResult) TransactionAware(org.apache.tephra.TransactionAware) ConsumerConfig(co.cask.cdap.data2.queue.ConsumerConfig) QueueName(co.cask.cdap.common.queue.QueueName) ConsumerGroupConfig(co.cask.cdap.data2.queue.ConsumerGroupConfig) Test(org.junit.Test) QueueTest(co.cask.cdap.data2.transaction.queue.QueueTest)

Example 13 with ConsumerConfig

use of co.cask.cdap.data2.queue.ConsumerConfig in project cdap by caskdata.

the class DequeueFilter method filterRowCells.

@Override
public void filterRowCells(List<Cell> cells) {
    byte[] dataBytes = null;
    byte[] metaBytes = null;
    byte[] stateBytes = null;
    // list is very short so it is ok to loop thru to find columns
    for (Cell cell : cells) {
        if (CellUtil.matchingQualifier(cell, QueueEntryRow.DATA_COLUMN)) {
            dataBytes = CellUtil.cloneValue(cell);
        } else if (CellUtil.matchingQualifier(cell, QueueEntryRow.META_COLUMN)) {
            metaBytes = CellUtil.cloneValue(cell);
        } else if (CellUtil.matchingQualifier(cell, stateColumnName)) {
            stateBytes = CellUtil.cloneValue(cell);
        }
    }
    if (dataBytes == null || metaBytes == null) {
        skipRow = true;
        return;
    }
    QueueEntryRow.CanConsume canConsume = QueueEntryRow.canConsume(consumerConfig, transaction, writePointer, counter, metaBytes, stateBytes);
    // Only skip the row when canConsumer == NO, so that in case of NO_INCLUDING_ALL_OLDER, the client
    // can still see the row and move the scan start row.
    skipRow = canConsume == QueueEntryRow.CanConsume.NO;
}
Also used : QueueEntryRow(co.cask.cdap.data2.transaction.queue.QueueEntryRow) Cell(org.apache.hadoop.hbase.Cell)

Example 14 with ConsumerConfig

use of co.cask.cdap.data2.queue.ConsumerConfig in project cdap by caskdata.

the class DequeueScanObserver method preScannerOpen.

@Override
public RegionScanner preScannerOpen(ObserverContext<RegionCoprocessorEnvironment> e, Scan scan, RegionScanner s) throws IOException {
    ConsumerConfig consumerConfig = DequeueScanAttributes.getConsumerConfig(scan);
    Transaction tx = DequeueScanAttributes.getTx(scan);
    if (consumerConfig == null || tx == null) {
        return super.preScannerOpen(e, scan, s);
    }
    Filter dequeueFilter = new DequeueFilter(consumerConfig, tx);
    Filter existing = scan.getFilter();
    if (existing != null) {
        Filter combined = new FilterList(FilterList.Operator.MUST_PASS_ALL, existing, dequeueFilter);
        scan.setFilter(combined);
    } else {
        scan.setFilter(dequeueFilter);
    }
    return super.preScannerOpen(e, scan, s);
}
Also used : Transaction(org.apache.tephra.Transaction) Filter(org.apache.hadoop.hbase.filter.Filter) ConsumerConfig(co.cask.cdap.data2.queue.ConsumerConfig) FilterList(org.apache.hadoop.hbase.filter.FilterList)

Example 15 with ConsumerConfig

use of co.cask.cdap.data2.queue.ConsumerConfig in project cdap by caskdata.

the class StreamConsumerTestBase method testNamespacedStreamConsumers.

@Test
public void testNamespacedStreamConsumers() throws Exception {
    // Test two consumers for two streams with the same name, but in different namespaces. Their consumption should be
    // independent of the other.
    String stream = "testNamespacedStreamConsumers";
    StreamId streamId = TEST_NAMESPACE.stream(stream);
    StreamId otherStreamId = OTHER_NAMESPACE.stream(stream);
    StreamAdmin streamAdmin = getStreamAdmin();
    streamAdmin.create(streamId);
    streamAdmin.create(otherStreamId);
    StreamConfig streamConfig = streamAdmin.getConfig(streamId);
    StreamConfig otherStreamConfig = streamAdmin.getConfig(otherStreamId);
    // Writes 5 events to both streams
    writeEvents(streamConfig, "Testing ", 5);
    writeEvents(otherStreamConfig, "Testing ", 5);
    streamAdmin.configureInstances(streamId, 0L, 1);
    streamAdmin.configureInstances(otherStreamId, 0L, 1);
    StreamConsumerFactory consumerFactory = getConsumerFactory();
    StreamConsumer consumer = consumerFactory.create(streamId, "fifo.rollback", new ConsumerConfig(0L, 0, 1, DequeueStrategy.FIFO, null));
    StreamConsumer otherConsumer = consumerFactory.create(otherStreamId, "fifo.rollback", new ConsumerConfig(0L, 0, 1, DequeueStrategy.FIFO, null));
    // Try to dequeue using both consumers
    TransactionContext context = createTxContext(consumer);
    TransactionContext otherContext = createTxContext(otherConsumer);
    context.start();
    otherContext.start();
    // Consume events from the stream in the default namespace
    DequeueResult<StreamEvent> result0 = consumer.poll(1, 1, TimeUnit.SECONDS);
    Assert.assertEquals("Testing 0", Charsets.UTF_8.decode(result0.iterator().next().getBody()).toString());
    context.finish();
    context.start();
    result0 = consumer.poll(1, 1, TimeUnit.SECONDS);
    Assert.assertEquals("Testing 1", Charsets.UTF_8.decode(result0.iterator().next().getBody()).toString());
    context.finish();
    context.start();
    result0 = consumer.poll(1, 1, TimeUnit.SECONDS);
    Assert.assertEquals("Testing 2", Charsets.UTF_8.decode(result0.iterator().next().getBody()).toString());
    context.finish();
    context.start();
    // Even though a stream with the same name has already consumed 3 events, the otherConsumer is for a stream in a
    // different namespace, so it will still be on the initial event.
    DequeueResult<StreamEvent> result1 = otherConsumer.poll(1, 1, TimeUnit.SECONDS);
    Assert.assertEquals("Testing 0", Charsets.UTF_8.decode(result1.iterator().next().getBody()).toString());
    otherContext.finish();
    otherContext.start();
    result0 = consumer.poll(1, 1, TimeUnit.SECONDS);
    result1 = otherConsumer.poll(1, 1, TimeUnit.SECONDS);
    Assert.assertEquals("Testing 3", Charsets.UTF_8.decode(result0.iterator().next().getBody()).toString());
    Assert.assertEquals("Testing 1", Charsets.UTF_8.decode(result1.iterator().next().getBody()).toString());
    // Commit both
    context.finish();
    otherContext.finish();
    consumer.close();
    otherConsumer.close();
}
Also used : StreamId(co.cask.cdap.proto.id.StreamId) TransactionContext(org.apache.tephra.TransactionContext) StreamEvent(co.cask.cdap.api.flow.flowlet.StreamEvent) ConsumerConfig(co.cask.cdap.data2.queue.ConsumerConfig) Test(org.junit.Test)

Aggregations

ConsumerConfig (co.cask.cdap.data2.queue.ConsumerConfig)33 QueueConsumer (co.cask.cdap.data2.queue.QueueConsumer)17 TransactionContext (org.apache.tephra.TransactionContext)14 Test (org.junit.Test)14 QueueName (co.cask.cdap.common.queue.QueueName)12 ConsumerGroupConfig (co.cask.cdap.data2.queue.ConsumerGroupConfig)12 QueueEntry (co.cask.cdap.data2.queue.QueueEntry)8 Transaction (org.apache.tephra.Transaction)8 StreamEvent (co.cask.cdap.api.flow.flowlet.StreamEvent)7 QueueProducer (co.cask.cdap.data2.queue.QueueProducer)7 QueueEntryRow (co.cask.cdap.data2.transaction.queue.QueueEntryRow)7 StreamId (co.cask.cdap.proto.id.StreamId)7 Cell (org.apache.hadoop.hbase.Cell)7 Filter (org.apache.hadoop.hbase.filter.Filter)7 FilterList (org.apache.hadoop.hbase.filter.FilterList)7 IOException (java.io.IOException)6 TransactionFailureException (org.apache.tephra.TransactionFailureException)6 TransactionExecutor (org.apache.tephra.TransactionExecutor)5 DequeueResult (co.cask.cdap.data2.queue.DequeueResult)4 DequeueStrategy (co.cask.cdap.data2.queue.DequeueStrategy)3