Search in sources :

Example 11 with ConsumerGroupConfig

use of co.cask.cdap.data2.queue.ConsumerGroupConfig in project cdap by caskdata.

the class HBaseQueueTest method configTest.

@Test
public void configTest() throws Exception {
    final QueueName queueName = QueueName.fromFlowlet(NamespaceId.DEFAULT.getEntityName(), "app", "flow", "flowlet", "configure");
    queueAdmin.create(queueName);
    final List<ConsumerGroupConfig> groupConfigs = ImmutableList.of(new ConsumerGroupConfig(1L, 1, DequeueStrategy.FIFO, null), new ConsumerGroupConfig(2L, 2, DequeueStrategy.FIFO, null), new ConsumerGroupConfig(3L, 3, DequeueStrategy.FIFO, null));
    try (HBaseConsumerStateStore stateStore = ((HBaseQueueAdmin) queueAdmin).getConsumerStateStore(queueName)) {
        TransactionExecutor txExecutor = Transactions.createTransactionExecutor(executorFactory, stateStore);
        // Intentionally set a row state for group 2, instance 0. It's for testing upgrade of config.
        txExecutor.execute(new TransactionExecutor.Subroutine() {

            @Override
            public void apply() throws Exception {
                stateStore.updateState(2L, 0, QueueEntryRow.getQueueEntryRowKey(queueName, 10L, 0));
            }
        });
        // Set the group info
        configureGroups(queueName, groupConfigs);
        txExecutor.execute(new TransactionExecutor.Subroutine() {

            @Override
            public void apply() throws Exception {
                for (ConsumerGroupConfig groupConfig : groupConfigs) {
                    long groupId = groupConfig.getGroupId();
                    List<QueueBarrier> queueBarriers = stateStore.getAllBarriers(groupId);
                    Assert.assertEquals(1, queueBarriers.size());
                    for (int instanceId = 0; instanceId < groupConfig.getGroupSize(); instanceId++) {
                        HBaseConsumerState state = stateStore.getState(groupId, instanceId);
                        if (groupId == 2L && instanceId == 0) {
                            // For group 2L instance 0, the start row shouldn't be changed.
                            // End row should be the same as the first barrier
                            Assert.assertEquals(0, Bytes.compareTo(state.getStartRow(), QueueEntryRow.getQueueEntryRowKey(queueName, 10L, 0)));
                            Assert.assertEquals(0, Bytes.compareTo(state.getNextBarrier(), queueBarriers.get(0).getStartRow()));
                        } else {
                            // For other group, they should have the start row the same as the first barrier info
                            Assert.assertEquals(0, Bytes.compareTo(state.getStartRow(), queueBarriers.get(0).getStartRow()));
                        }
                    }
                }
            }
        });
        txExecutor.execute(new TransactionExecutor.Subroutine() {

            @Override
            public void apply() throws Exception {
                // Check consumers are all processed up to the barrier boundary
                for (long groupId = 1L; groupId <= 3L; groupId++) {
                    List<QueueBarrier> queueBarriers = stateStore.getAllBarriers(groupId);
                    boolean allConsumed = stateStore.isAllConsumed(groupId, queueBarriers.get(0).getStartRow());
                    // For group 2, instance 0 is not consumed up to the boundary yet
                    Assert.assertTrue((groupId == 2L) != allConsumed);
                    if (groupId == 2L) {
                        // Mark group 2, instance 0 as completed the barrier.
                        stateStore.completed(groupId, 0);
                    }
                }
            }
        });
        txExecutor.execute(new TransactionExecutor.Subroutine() {

            @Override
            public void apply() throws Exception {
                // After group 2, instance 0 completed the current barrier, all consumers in group 2 should be able to
                // proceed
                List<QueueBarrier> queueBarriers = stateStore.getAllBarriers(2L);
                byte[] startRow = stateStore.getState(2L, 0).getStartRow();
                Assert.assertEquals(0, Bytes.compareTo(startRow, queueBarriers.get(0).getStartRow()));
                Assert.assertTrue(stateStore.isAllConsumed(2L, startRow));
            }
        });
        // Add instance to group 2
        txExecutor.execute(new TransactionExecutor.Subroutine() {

            @Override
            public void apply() throws Exception {
                stateStore.configureInstances(2L, 3);
            }
        });
        txExecutor.execute(new TransactionExecutor.Subroutine() {

            @Override
            public void apply() throws Exception {
                List<QueueBarrier> queueBarriers = stateStore.getAllBarriers(2L);
                Assert.assertEquals(2, queueBarriers.size());
                // For existing instances, the start row shouldn't changed.
                for (int instanceId = 0; instanceId < 2; instanceId++) {
                    HBaseConsumerState state = stateStore.getState(2L, instanceId);
                    Assert.assertEquals(0, Bytes.compareTo(state.getStartRow(), queueBarriers.get(0).getStartRow()));
                    Assert.assertEquals(0, Bytes.compareTo(state.getNextBarrier(), queueBarriers.get(1).getStartRow()));
                    // Complete the existing instance
                    stateStore.completed(2L, instanceId);
                }
                // For new instances, the start row should be the same as the new barrier
                HBaseConsumerState state = stateStore.getState(2L, 2);
                Assert.assertEquals(0, Bytes.compareTo(state.getStartRow(), queueBarriers.get(1).getStartRow()));
                Assert.assertNull(state.getNextBarrier());
                // All instances should be consumed up to the beginning of the last barrier info
                Assert.assertTrue(stateStore.isAllConsumed(2L, queueBarriers.get(1).getStartRow()));
            }
        });
        // Reduce instances of group 2 through group reconfiguration, remove group 1 and 3, add group 4.
        configureGroups(queueName, ImmutableList.of(new ConsumerGroupConfig(2L, 1, DequeueStrategy.FIFO, null), new ConsumerGroupConfig(4L, 1, DequeueStrategy.FIFO, null)));
        txExecutor.execute(new TransactionExecutor.Subroutine() {

            @Override
            public void apply() throws Exception {
                // States and barrier info for removed groups should be gone
                try {
                    // There should be no barrier info for group 1
                    List<QueueBarrier> queueBarriers = stateStore.getAllBarriers(1L);
                    Assert.assertTrue(queueBarriers.isEmpty());
                    stateStore.getState(1L, 0);
                    Assert.fail("Not expected to get state for group 1");
                } catch (Exception e) {
                // Expected
                }
                try {
                    // There should be no barrier info for group 3
                    List<QueueBarrier> queueBarriers = stateStore.getAllBarriers(3L);
                    Assert.assertTrue(queueBarriers.isEmpty());
                    stateStore.getState(3L, 0);
                    Assert.fail("Not expected to get state for group 3");
                } catch (Exception e) {
                // Expected
                }
                // For group 2, there should be two barrier infos,
                // since all consumers passed the first barrier (groupSize = 2). Only the size = 3 and size = 1 left
                List<QueueBarrier> queueBarriers = stateStore.getAllBarriers(2L);
                Assert.assertEquals(2, queueBarriers.size());
                // Make all consumers (3 of them before reconfigure) in group 2 consumes everything
                for (int instanceId = 0; instanceId < 3; instanceId++) {
                    stateStore.completed(2L, instanceId);
                }
                // For the remaining consumer, it should start consuming from the latest barrier
                HBaseConsumerState state = stateStore.getState(2L, 0);
                Assert.assertEquals(0, Bytes.compareTo(state.getStartRow(), queueBarriers.get(1).getStartRow()));
                Assert.assertNull(state.getNextBarrier());
                // For removed instances, they should throw exception when retrieving their states
                for (int i = 1; i < 3; i++) {
                    try {
                        stateStore.getState(2L, i);
                        Assert.fail("Not expected to get state for group 2, instance " + i);
                    } catch (Exception e) {
                    // Expected
                    }
                }
            }
        });
    } finally {
        queueAdmin.dropAllInNamespace(NamespaceId.DEFAULT);
    }
}
Also used : TransactionExecutor(org.apache.tephra.TransactionExecutor) ArrayList(java.util.ArrayList) List(java.util.List) ImmutableList(com.google.common.collect.ImmutableList) QueueName(co.cask.cdap.common.queue.QueueName) ConsumerGroupConfig(co.cask.cdap.data2.queue.ConsumerGroupConfig) IOException(java.io.IOException) TableNotFoundException(org.apache.hadoop.hbase.TableNotFoundException) Test(org.junit.Test) QueueTest(co.cask.cdap.data2.transaction.queue.QueueTest)

Example 12 with ConsumerGroupConfig

use of co.cask.cdap.data2.queue.ConsumerGroupConfig in project cdap by caskdata.

the class HBaseQueueDebugger method scanQueue.

private void scanQueue(TransactionExecutor txExecutor, HBaseConsumerStateStore stateStore, QueueName queueName, QueueBarrier start, @Nullable QueueBarrier end, final QueueStatistics outStats) throws Exception {
    final byte[] queueRowPrefix = QueueEntryRow.getQueueRowPrefix(queueName);
    ConsumerGroupConfig groupConfig = start.getGroupConfig();
    printProgress("Got consumer group config: %s\n", groupConfig);
    HBaseQueueAdmin admin = queueClientFactory.getQueueAdmin();
    TableId tableId = admin.getDataTableId(queueName, QueueConstants.QueueType.SHARDED_QUEUE);
    HTable hTable = queueClientFactory.createHTable(tableId);
    printProgress("Looking at HBase table: %s\n", Bytes.toString(hTable.getTableName()));
    final byte[] stateColumnName = Bytes.add(QueueEntryRow.STATE_COLUMN_PREFIX, Bytes.toBytes(groupConfig.getGroupId()));
    int distributorBuckets = queueClientFactory.getDistributorBuckets(hTable.getTableDescriptor());
    ShardedHBaseQueueStrategy queueStrategy = new ShardedHBaseQueueStrategy(tableUtil, distributorBuckets);
    ScanBuilder scan = tableUtil.buildScan();
    scan.setStartRow(start.getStartRow());
    if (end != null) {
        scan.setStopRow(end.getStartRow());
    } else {
        scan.setStopRow(QueueEntryRow.getQueueEntryRowKey(queueName, Long.MAX_VALUE, Integer.MAX_VALUE));
    }
    // Needs to include meta column for row that doesn't have state yet.
    scan.addColumn(QueueEntryRow.COLUMN_FAMILY, QueueEntryRow.META_COLUMN);
    scan.addColumn(QueueEntryRow.COLUMN_FAMILY, stateColumnName);
    // Don't do block cache for debug tool. We don't want old blocks get cached
    scan.setCacheBlocks(false);
    scan.setMaxVersions(1);
    printProgress("Scanning section with scan: %s\n", scan.toString());
    List<Integer> instanceIds = Lists.newArrayList();
    if (groupConfig.getDequeueStrategy() == DequeueStrategy.FIFO) {
        instanceIds.add(0);
    } else {
        for (int instanceId = 0; instanceId < groupConfig.getGroupSize(); instanceId++) {
            instanceIds.add(instanceId);
        }
    }
    final int rowsCache = Integer.parseInt(System.getProperty(PROP_ROWS_CACHE, "100000"));
    for (final int instanceId : instanceIds) {
        printProgress("Processing instance %d", instanceId);
        ConsumerConfig consConfig = new ConsumerConfig(groupConfig, instanceId);
        final QueueScanner scanner = queueStrategy.createScanner(consConfig, hTable, scan.build(), rowsCache);
        try {
            txExecutor.execute(new TransactionExecutor.Procedure<HBaseConsumerStateStore>() {

                @Override
                public void apply(HBaseConsumerStateStore input) throws Exception {
                    ImmutablePair<byte[], Map<byte[], byte[]>> result;
                    while ((result = scanner.next()) != null) {
                        byte[] rowKey = result.getFirst();
                        Map<byte[], byte[]> columns = result.getSecond();
                        visitRow(outStats, input.getTransaction(), rowKey, columns.get(stateColumnName), queueRowPrefix.length);
                        if (showProgress() && outStats.getTotal() % rowsCache == 0) {
                            System.out.printf("\rProcessing instance %d: %s", instanceId, outStats.getReport(showTxTimestampOnly()));
                        }
                    }
                }
            }, stateStore);
        } catch (TransactionFailureException e) {
            // Ignore transaction not in progress exception as it's caused by short TX timeout on commit
            if (!(Throwables.getRootCause(e) instanceof TransactionNotInProgressException)) {
                throw Throwables.propagate(e);
            }
        }
        printProgress("\rProcessing instance %d: %s\n", instanceId, outStats.getReport(showTxTimestampOnly()));
    }
}
Also used : TableId(co.cask.cdap.data2.util.TableId) ShardedHBaseQueueStrategy(co.cask.cdap.data2.transaction.queue.hbase.ShardedHBaseQueueStrategy) ScanBuilder(co.cask.cdap.data2.util.hbase.ScanBuilder) TransactionExecutor(org.apache.tephra.TransactionExecutor) TransactionNotInProgressException(org.apache.tephra.TransactionNotInProgressException) HTable(org.apache.hadoop.hbase.client.HTable) TransactionNotInProgressException(org.apache.tephra.TransactionNotInProgressException) TransactionFailureException(org.apache.tephra.TransactionFailureException) NotFoundException(co.cask.cdap.common.NotFoundException) HBaseConsumerStateStore(co.cask.cdap.data2.transaction.queue.hbase.HBaseConsumerStateStore) TransactionFailureException(org.apache.tephra.TransactionFailureException) ImmutablePair(co.cask.cdap.common.utils.ImmutablePair) HBaseQueueAdmin(co.cask.cdap.data2.transaction.queue.hbase.HBaseQueueAdmin) ConsumerConfig(co.cask.cdap.data2.queue.ConsumerConfig) QueueScanner(co.cask.cdap.data2.transaction.queue.QueueScanner) ConsumerGroupConfig(co.cask.cdap.data2.queue.ConsumerGroupConfig) Map(java.util.Map)

Example 13 with ConsumerGroupConfig

use of co.cask.cdap.data2.queue.ConsumerGroupConfig in project cdap by caskdata.

the class FlowUtils method createConsumerGroupConfig.

/**
 * Creates a {@link ConsumerGroupConfig} by inspecting the given process method.
 */
public static ConsumerGroupConfig createConsumerGroupConfig(long groupId, int groupSize, Method processMethod) {
    // Determine input queue partition type
    HashPartition hashPartition = processMethod.getAnnotation(HashPartition.class);
    RoundRobin roundRobin = processMethod.getAnnotation(RoundRobin.class);
    DequeueStrategy strategy = DequeueStrategy.FIFO;
    String hashKey = null;
    Preconditions.checkArgument(!(hashPartition != null && roundRobin != null), "Only one strategy allowed for process() method: %s", processMethod.getName());
    if (hashPartition != null) {
        strategy = DequeueStrategy.HASH;
        hashKey = hashPartition.value();
        Preconditions.checkArgument(!hashKey.isEmpty(), "Partition key cannot be empty: %s", processMethod.getName());
    } else if (roundRobin != null) {
        strategy = DequeueStrategy.ROUND_ROBIN;
    }
    return new ConsumerGroupConfig(groupId, groupSize, strategy, hashKey);
}
Also used : DequeueStrategy(co.cask.cdap.data2.queue.DequeueStrategy) RoundRobin(co.cask.cdap.api.annotation.RoundRobin) HashPartition(co.cask.cdap.api.annotation.HashPartition) ConsumerGroupConfig(co.cask.cdap.data2.queue.ConsumerGroupConfig)

Example 14 with ConsumerGroupConfig

use of co.cask.cdap.data2.queue.ConsumerGroupConfig in project cdap by caskdata.

the class HBaseConsumerStateStore method decodeBarrierInfo.

@Nullable
private QueueBarrier decodeBarrierInfo(byte[] rowKey, @Nullable byte[] groupInfo) {
    if (groupInfo == null) {
        return null;
    }
    ConsumerGroupConfig groupConfig = GSON.fromJson(new String(groupInfo, Charsets.UTF_8), ConsumerGroupConfig.class);
    byte[] startRow = Arrays.copyOfRange(rowKey, queueName.toBytes().length, rowKey.length);
    return new QueueBarrier(groupConfig, startRow);
}
Also used : ConsumerGroupConfig(co.cask.cdap.data2.queue.ConsumerGroupConfig) Nullable(javax.annotation.Nullable)

Example 15 with ConsumerGroupConfig

use of co.cask.cdap.data2.queue.ConsumerGroupConfig in project cdap by caskdata.

the class HBaseQueueClientFactory method createProducer.

@Override
public QueueProducer createProducer(QueueName queueName, QueueMetrics queueMetrics) throws IOException {
    HBaseQueueAdmin admin = ensureTableExists(queueName);
    try {
        final List<ConsumerGroupConfig> groupConfigs = Lists.newArrayList();
        try (HBaseConsumerStateStore stateStore = admin.getConsumerStateStore(queueName)) {
            Transactions.createTransactionExecutor(txExecutorFactory, stateStore).execute(new Subroutine() {

                @Override
                public void apply() throws Exception {
                    stateStore.getLatestConsumerGroups(groupConfigs);
                }
            });
        }
        Preconditions.checkState(!groupConfigs.isEmpty(), "Missing consumer group information for queue %s", queueName);
        HTable hTable = createHTable(admin.getDataTableId(queueName, queueAdmin.getType()));
        int distributorBuckets = getDistributorBuckets(hTable.getTableDescriptor());
        return createProducer(hTable, queueName, queueMetrics, new ShardedHBaseQueueStrategy(hBaseTableUtil, distributorBuckets), groupConfigs);
    } catch (Exception e) {
        Throwables.propagateIfPossible(e);
        throw new IOException(e);
    }
}
Also used : Subroutine(org.apache.tephra.TransactionExecutor.Subroutine) IOException(java.io.IOException) HTable(org.apache.hadoop.hbase.client.HTable) ConsumerGroupConfig(co.cask.cdap.data2.queue.ConsumerGroupConfig) IOException(java.io.IOException)

Aggregations

ConsumerGroupConfig (co.cask.cdap.data2.queue.ConsumerGroupConfig)23 ConsumerConfig (co.cask.cdap.data2.queue.ConsumerConfig)12 QueueName (co.cask.cdap.common.queue.QueueName)11 QueueConsumer (co.cask.cdap.data2.queue.QueueConsumer)10 TransactionContext (org.apache.tephra.TransactionContext)8 Test (org.junit.Test)8 QueueEntry (co.cask.cdap.data2.queue.QueueEntry)6 QueueProducer (co.cask.cdap.data2.queue.QueueProducer)5 IOException (java.io.IOException)5 Map (java.util.Map)5 TransactionExecutor (org.apache.tephra.TransactionExecutor)5 TransactionFailureException (org.apache.tephra.TransactionFailureException)4 DequeueResult (co.cask.cdap.data2.queue.DequeueResult)3 QueueTest (co.cask.cdap.data2.transaction.queue.QueueTest)3 TableNotFoundException (org.apache.hadoop.hbase.TableNotFoundException)3 Put (co.cask.cdap.api.dataset.table.Put)2 Row (co.cask.cdap.api.dataset.table.Row)2 Scanner (co.cask.cdap.api.dataset.table.Scanner)2 FlowletDefinition (co.cask.cdap.api.flow.FlowletDefinition)2 QueueSpecification (co.cask.cdap.app.queue.QueueSpecification)2