Search in sources :

Example 6 with ConsumerConfig

use of co.cask.cdap.data2.queue.ConsumerConfig in project cdap by caskdata.

the class LevelDBQueueClientFactory method createConsumer.

@Override
public QueueConsumer createConsumer(QueueName queueName, ConsumerConfig consumerConfig, int numGroups) throws IOException {
    LevelDBQueueAdmin admin = ensureTableExists(queueName);
    LevelDBTableCore core = new LevelDBTableCore(admin.getActualTableName(queueName), service);
    // only the first consumer of each group runs eviction; and only if the number of consumers is known (> 0).
    QueueEvictor evictor = (numGroups <= 0 || consumerConfig.getInstanceId() != 0) ? QueueEvictor.NOOP : createEvictor(queueName, numGroups, core);
    return new LevelDBQueueConsumer(cConf, core, getQueueLock(queueName.toString()), consumerConfig, queueName, evictor);
}
Also used : LevelDBTableCore(co.cask.cdap.data2.dataset2.lib.table.leveldb.LevelDBTableCore) QueueEvictor(co.cask.cdap.data2.transaction.queue.QueueEvictor)

Example 7 with ConsumerConfig

use of co.cask.cdap.data2.queue.ConsumerConfig in project cdap by caskdata.

the class DequeueFilter method filterRowCells.

@Override
public void filterRowCells(List<Cell> cells) {
    byte[] dataBytes = null;
    byte[] metaBytes = null;
    byte[] stateBytes = null;
    // list is very short so it is ok to loop thru to find columns
    for (Cell cell : cells) {
        if (CellUtil.matchingQualifier(cell, QueueEntryRow.DATA_COLUMN)) {
            dataBytes = CellUtil.cloneValue(cell);
        } else if (CellUtil.matchingQualifier(cell, QueueEntryRow.META_COLUMN)) {
            metaBytes = CellUtil.cloneValue(cell);
        } else if (CellUtil.matchingQualifier(cell, stateColumnName)) {
            stateBytes = CellUtil.cloneValue(cell);
        }
    }
    if (dataBytes == null || metaBytes == null) {
        skipRow = true;
        return;
    }
    QueueEntryRow.CanConsume canConsume = QueueEntryRow.canConsume(consumerConfig, transaction, writePointer, counter, metaBytes, stateBytes);
    // Only skip the row when canConsumer == NO, so that in case of NO_INCLUDING_ALL_OLDER, the client
    // can still see the row and move the scan start row.
    skipRow = canConsume == QueueEntryRow.CanConsume.NO;
}
Also used : QueueEntryRow(co.cask.cdap.data2.transaction.queue.QueueEntryRow) Cell(org.apache.hadoop.hbase.Cell)

Example 8 with ConsumerConfig

use of co.cask.cdap.data2.queue.ConsumerConfig in project cdap by caskdata.

the class QueueTest method testQueueAbortRetrySkip.

@Test(timeout = TIMEOUT_MS)
public void testQueueAbortRetrySkip() throws Exception {
    QueueName queueName = QueueName.fromFlowlet(NamespaceId.DEFAULT.getEntityName(), "app", "flow", "flowlet", "queuefailure");
    configureGroups(queueName, ImmutableList.of(new ConsumerGroupConfig(0L, 1, DequeueStrategy.FIFO, null), new ConsumerGroupConfig(1L, 1, DequeueStrategy.HASH, "key")));
    List<ConsumerConfig> consumerConfigs = ImmutableList.of(new ConsumerConfig(0, 0, 1, DequeueStrategy.FIFO, null), new ConsumerConfig(1, 0, 1, DequeueStrategy.HASH, "key"));
    createEnqueueRunnable(queueName, 5, 1, null).run();
    try (QueueConsumer fifoConsumer = queueClientFactory.createConsumer(queueName, consumerConfigs.get(0), 2);
        QueueConsumer hashConsumer = queueClientFactory.createConsumer(queueName, consumerConfigs.get(1), 2)) {
        TransactionContext txContext = createTxContext(fifoConsumer, hashConsumer);
        txContext.start();
        Assert.assertEquals(0, Bytes.toInt(fifoConsumer.dequeue().iterator().next()));
        Assert.assertEquals(0, Bytes.toInt(hashConsumer.dequeue().iterator().next()));
        // Abort the consumer transaction
        txContext.abort();
        // Dequeue again in a new transaction, should see the same entries
        txContext.start();
        Assert.assertEquals(0, Bytes.toInt(fifoConsumer.dequeue().iterator().next()));
        Assert.assertEquals(0, Bytes.toInt(hashConsumer.dequeue().iterator().next()));
        txContext.finish();
        // Dequeue again, now should get next entry
        txContext.start();
        Assert.assertEquals(1, Bytes.toInt(fifoConsumer.dequeue().iterator().next()));
        Assert.assertEquals(1, Bytes.toInt(hashConsumer.dequeue().iterator().next()));
        txContext.finish();
        // Dequeue a result and abort.
        txContext.start();
        DequeueResult<byte[]> fifoResult = fifoConsumer.dequeue();
        DequeueResult<byte[]> hashResult = hashConsumer.dequeue();
        Assert.assertEquals(2, Bytes.toInt(fifoResult.iterator().next()));
        Assert.assertEquals(2, Bytes.toInt(hashResult.iterator().next()));
        txContext.abort();
        // Now skip the result with a new transaction.
        txContext.start();
        fifoResult.reclaim();
        hashResult.reclaim();
        txContext.finish();
        // Dequeue again, it should see a new entry
        txContext.start();
        Assert.assertEquals(3, Bytes.toInt(fifoConsumer.dequeue().iterator().next()));
        Assert.assertEquals(3, Bytes.toInt(hashConsumer.dequeue().iterator().next()));
        txContext.finish();
        // Dequeue again, it should see a new entry
        txContext.start();
        Assert.assertEquals(4, Bytes.toInt(fifoConsumer.dequeue().iterator().next()));
        Assert.assertEquals(4, Bytes.toInt(hashConsumer.dequeue().iterator().next()));
        txContext.finish();
    }
    verifyQueueIsEmpty(queueName, consumerConfigs);
}
Also used : QueueConsumer(co.cask.cdap.data2.queue.QueueConsumer) TransactionContext(org.apache.tephra.TransactionContext) ConsumerConfig(co.cask.cdap.data2.queue.ConsumerConfig) QueueName(co.cask.cdap.common.queue.QueueName) ConsumerGroupConfig(co.cask.cdap.data2.queue.ConsumerGroupConfig) Test(org.junit.Test)

Example 9 with ConsumerConfig

use of co.cask.cdap.data2.queue.ConsumerConfig in project cdap by caskdata.

the class QueueTest method testRollback.

@Test(timeout = TIMEOUT_MS)
public void testRollback() throws Exception {
    QueueName queueName = QueueName.fromFlowlet(NamespaceId.DEFAULT.getEntityName(), "app", "flow", "flowlet", "queuerollback");
    ConsumerConfig consumerConfig = new ConsumerConfig(0, 0, 1, DequeueStrategy.FIFO, null);
    configureGroups(queueName, ImmutableList.of(consumerConfig));
    try (QueueProducer producer = queueClientFactory.createProducer(queueName);
        QueueConsumer consumer = queueClientFactory.createConsumer(queueName, consumerConfig, 1)) {
        TransactionContext txContext = createTxContext(producer, consumer, new TransactionAware() {

            boolean canCommit = false;

            @Override
            public void startTx(Transaction tx) {
            }

            @Override
            public void updateTx(Transaction tx) {
            }

            @Override
            public Collection<byte[]> getTxChanges() {
                return ImmutableList.of();
            }

            @Override
            public boolean commitTx() throws Exception {
                // Flip-flop between commit success/failure.
                boolean res = canCommit;
                canCommit = !canCommit;
                return res;
            }

            @Override
            public void postTxCommit() {
            }

            @Override
            public boolean rollbackTx() throws Exception {
                return true;
            }

            @Override
            public String getTransactionAwareName() {
                return "test";
            }
        });
        // First, try to enqueue and commit would fail
        txContext.start();
        try {
            producer.enqueue(new QueueEntry(Bytes.toBytes(1)));
            txContext.finish();
            // If reaches here, it's wrong, as exception should be thrown.
            Assert.assertTrue(false);
        } catch (TransactionFailureException e) {
            txContext.abort();
        }
        // Try to enqueue again. Within the same transaction, dequeue should be empty.
        txContext.start();
        producer.enqueue(new QueueEntry(Bytes.toBytes(1)));
        Assert.assertTrue(consumer.dequeue().isEmpty());
        txContext.finish();
        // This time, enqueue has been committed, dequeue would see the item
        txContext.start();
        try {
            Assert.assertEquals(1, Bytes.toInt(consumer.dequeue().iterator().next()));
            txContext.finish();
            // If reaches here, it's wrong, as exception should be thrown.
            Assert.assertTrue(false);
        } catch (TransactionFailureException e) {
            txContext.abort();
        }
        // Dequeue again, since last tx was rollback, this dequeue should see the item again.
        txContext.start();
        Assert.assertEquals(1, Bytes.toInt(consumer.dequeue().iterator().next()));
        txContext.finish();
    }
}
Also used : QueueEntry(co.cask.cdap.data2.queue.QueueEntry) TransactionFailureException(org.apache.tephra.TransactionFailureException) TransactionFailureException(org.apache.tephra.TransactionFailureException) QueueConsumer(co.cask.cdap.data2.queue.QueueConsumer) Transaction(org.apache.tephra.Transaction) QueueProducer(co.cask.cdap.data2.queue.QueueProducer) TransactionContext(org.apache.tephra.TransactionContext) TransactionAware(org.apache.tephra.TransactionAware) Collection(java.util.Collection) ConsumerConfig(co.cask.cdap.data2.queue.ConsumerConfig) QueueName(co.cask.cdap.common.queue.QueueName) Test(org.junit.Test)

Example 10 with ConsumerConfig

use of co.cask.cdap.data2.queue.ConsumerConfig in project cdap by caskdata.

the class QueueTest method testConcurrentEnqueue.

@Category(SlowTests.class)
@Test
public void testConcurrentEnqueue() throws Exception {
    // This test is for testing multiple producers that writes with a delay after a transaction started.
    // This is for verifying consumer advances the startKey correctly.
    final QueueName queueName = QueueName.fromFlowlet(NamespaceId.DEFAULT.getEntityName(), "app", "flow", "flowlet", "concurrent");
    configureGroups(queueName, ImmutableList.of(new ConsumerGroupConfig(0, 1, DequeueStrategy.FIFO, null)));
    final CyclicBarrier barrier = new CyclicBarrier(4);
    ConsumerConfig consumerConfig = new ConsumerConfig(0, 0, 1, DequeueStrategy.FIFO, null);
    // Starts three producers to enqueue concurrently. For each entry, starts a TX, sleep, enqueue, commit.
    ExecutorService executor = Executors.newFixedThreadPool(3);
    final int entryCount = 50;
    for (int i = 0; i < 3; i++) {
        final QueueProducer producer = queueClientFactory.createProducer(queueName);
        final int producerId = i + 1;
        executor.execute(new Runnable() {

            @Override
            public void run() {
                try {
                    barrier.await();
                    for (int i = 0; i < entryCount; i++) {
                        TransactionContext txContext = createTxContext(producer);
                        txContext.start();
                        // Sleeps at different rate to make the scan in consumer has higher change to see
                        // the transaction but not the entry (as not yet written)
                        TimeUnit.MILLISECONDS.sleep(producerId * 50);
                        producer.enqueue(new QueueEntry(Bytes.toBytes(i)));
                        txContext.finish();
                    }
                } catch (Exception e) {
                    LOG.error(e.getMessage(), e);
                } finally {
                    Closeables.closeQuietly(producer);
                }
            }
        });
    }
    // sum(0..entryCount) * 3
    int expectedSum = entryCount * (entryCount - 1) / 2 * 3;
    try (QueueConsumer consumer = queueClientFactory.createConsumer(queueName, consumerConfig, 1)) {
        // Trigger starts of producer
        barrier.await();
        int dequeueSum = 0;
        int noProgress = 0;
        while (dequeueSum != expectedSum && noProgress < 200) {
            TransactionContext txContext = createTxContext(consumer);
            txContext.start();
            DequeueResult<byte[]> result = consumer.dequeue();
            if (!result.isEmpty()) {
                noProgress = 0;
                int value = Bytes.toInt(result.iterator().next());
                dequeueSum += value;
            } else {
                noProgress++;
                TimeUnit.MILLISECONDS.sleep(10);
            }
            txContext.finish();
        }
        Assert.assertEquals(expectedSum, dequeueSum);
    }
}
Also used : QueueEntry(co.cask.cdap.data2.queue.QueueEntry) TransactionFailureException(org.apache.tephra.TransactionFailureException) CyclicBarrier(java.util.concurrent.CyclicBarrier) QueueConsumer(co.cask.cdap.data2.queue.QueueConsumer) QueueProducer(co.cask.cdap.data2.queue.QueueProducer) TransactionContext(org.apache.tephra.TransactionContext) ExecutorService(java.util.concurrent.ExecutorService) ConsumerConfig(co.cask.cdap.data2.queue.ConsumerConfig) QueueName(co.cask.cdap.common.queue.QueueName) ConsumerGroupConfig(co.cask.cdap.data2.queue.ConsumerGroupConfig) Category(org.junit.experimental.categories.Category) Test(org.junit.Test)

Aggregations

ConsumerConfig (co.cask.cdap.data2.queue.ConsumerConfig)33 QueueConsumer (co.cask.cdap.data2.queue.QueueConsumer)17 TransactionContext (org.apache.tephra.TransactionContext)14 Test (org.junit.Test)14 QueueName (co.cask.cdap.common.queue.QueueName)12 ConsumerGroupConfig (co.cask.cdap.data2.queue.ConsumerGroupConfig)12 QueueEntry (co.cask.cdap.data2.queue.QueueEntry)8 Transaction (org.apache.tephra.Transaction)8 StreamEvent (co.cask.cdap.api.flow.flowlet.StreamEvent)7 QueueProducer (co.cask.cdap.data2.queue.QueueProducer)7 QueueEntryRow (co.cask.cdap.data2.transaction.queue.QueueEntryRow)7 StreamId (co.cask.cdap.proto.id.StreamId)7 Cell (org.apache.hadoop.hbase.Cell)7 Filter (org.apache.hadoop.hbase.filter.Filter)7 FilterList (org.apache.hadoop.hbase.filter.FilterList)7 IOException (java.io.IOException)6 TransactionFailureException (org.apache.tephra.TransactionFailureException)6 TransactionExecutor (org.apache.tephra.TransactionExecutor)5 DequeueResult (co.cask.cdap.data2.queue.DequeueResult)4 DequeueStrategy (co.cask.cdap.data2.queue.DequeueStrategy)3