Search in sources :

Example 46 with Scanner

use of co.cask.cdap.api.dataset.table.Scanner in project cdap by caskdata.

the class HBaseConsumerStateStore method completed.

/**
 * Called by consumer to signal process completion up to the current barrier that the consumer is in.
 */
void completed(long groupId, int instanceId) {
    // Get the current consumer state to get the end barrier info
    ConsumerState consumerState = getConsumerState(groupId, instanceId);
    QueueBarrier nextBarrier = consumerState.getNextBarrier();
    if (nextBarrier == null) {
        // End row shouldn't be null if this method is called
        throw new IllegalArgumentException(String.format("No end barrier information for consumer. Queue: %s, GroupId: %d, InstanceId: %d", queueName, groupId, instanceId));
    }
    byte[] stateColumn = getConsumerStateColumn(groupId, instanceId);
    // If the instance exists in the next barrier, set the start row to the barrier start
    if (instanceId < nextBarrier.getGroupConfig().getGroupSize()) {
        table.put(queueName.toBytes(), stateColumn, nextBarrier.getStartRow());
        return;
    }
    // find the next start barrier that this instance needs to consume from
    try (Scanner scanner = table.scan(Bytes.add(queueName.toBytes(), nextBarrier.getStartRow()), barrierScanEndRow)) {
        Row row;
        boolean found = false;
        while (!found && (row = scanner.next()) != null) {
            QueueBarrier queueBarrier = decodeBarrierInfo(row, groupId);
            if (queueBarrier == null || instanceId >= queueBarrier.getGroupConfig().getGroupSize()) {
                continue;
            }
            table.put(queueName.toBytes(), stateColumn, queueBarrier.getStartRow());
            found = true;
        }
        if (!found) {
            // Remove the state since this consumer instance is not longer active
            table.delete(queueName.toBytes(), stateColumn);
        }
    }
}
Also used : Scanner(co.cask.cdap.api.dataset.table.Scanner) QueueEntryRow(co.cask.cdap.data2.transaction.queue.QueueEntryRow) Row(co.cask.cdap.api.dataset.table.Row)

Example 47 with Scanner

use of co.cask.cdap.api.dataset.table.Scanner in project cdap by caskdata.

the class HBaseQueueAdmin method deleteFlowConfigs.

private void deleteFlowConfigs(FlowId flowId) throws Exception {
    // It's a bit hacky here since we know how the HBaseConsumerStateStore works.
    // Maybe we need another Dataset set that works across all queues.
    final QueueName prefixName = QueueName.from(URI.create(QueueName.prefixForFlow(flowId)));
    DatasetId stateStoreId = getStateStoreId(flowId.getNamespace());
    Map<String, String> args = ImmutableMap.of(HBaseQueueDatasetModule.PROPERTY_QUEUE_NAME, prefixName.toString());
    HBaseConsumerStateStore stateStore = datasetFramework.getDataset(stateStoreId, args, null);
    if (stateStore == null) {
        // If the state store doesn't exists, meaning there is no queue, hence nothing to do.
        return;
    }
    try {
        final Table table = stateStore.getInternalTable();
        Transactions.createTransactionExecutor(txExecutorFactory, (TransactionAware) table).execute(new TransactionExecutor.Subroutine() {

            @Override
            public void apply() throws Exception {
                // Prefix name is "/" terminated ("queue:///namespace/app/flow/"), hence the scan is unique for the flow
                byte[] startRow = Bytes.toBytes(prefixName.toString());
                try (Scanner scanner = table.scan(startRow, Bytes.stopKeyForPrefix(startRow))) {
                    Row row = scanner.next();
                    while (row != null) {
                        table.delete(row.getRow());
                        row = scanner.next();
                    }
                }
            }
        });
    } finally {
        stateStore.close();
    }
}
Also used : Scanner(co.cask.cdap.api.dataset.table.Scanner) Table(co.cask.cdap.api.dataset.table.Table) HTable(org.apache.hadoop.hbase.client.HTable) TransactionExecutor(org.apache.tephra.TransactionExecutor) DatasetManagementException(co.cask.cdap.api.dataset.DatasetManagementException) IOException(java.io.IOException) ExecutionException(java.util.concurrent.ExecutionException) DatasetId(co.cask.cdap.proto.id.DatasetId) TransactionAware(org.apache.tephra.TransactionAware) QueueEntryRow(co.cask.cdap.data2.transaction.queue.QueueEntryRow) Row(co.cask.cdap.api.dataset.table.Row) QueueName(co.cask.cdap.common.queue.QueueName)

Example 48 with Scanner

use of co.cask.cdap.api.dataset.table.Scanner in project cdap by caskdata.

the class TableTest method testMetrics.

private void testMetrics(boolean readless) throws Exception {
    final String tableName = "survive";
    DatasetProperties props = TableProperties.builder().setReadlessIncrementSupport(readless).build();
    DatasetAdmin admin = getTableAdmin(CONTEXT1, tableName, props);
    admin.create();
    Table table = getTable(CONTEXT1, tableName, props);
    final Map<String, Long> metrics = Maps.newHashMap();
    ((MeteredDataset) table).setMetricsCollector(new MetricsCollector() {

        @Override
        public void increment(String metricName, long value) {
            Long old = metrics.get(metricName);
            metrics.put(metricName, old == null ? value : old + value);
        }

        @Override
        public void gauge(String metricName, long value) {
            metrics.put(metricName, value);
        }
    });
    // Note that we don't need to finish tx for metrics to be reported
    Transaction tx0 = txClient.startShort();
    ((TransactionAware) table).startTx(tx0);
    int writes = 0;
    int reads = 0;
    table.put(new Put(R1, C1, V1));
    verifyDatasetMetrics(metrics, ++writes, reads);
    table.compareAndSwap(R1, C1, V1, V2);
    verifyDatasetMetrics(metrics, ++writes, ++reads);
    // note: will not write anything as expected value will not match
    table.compareAndSwap(R1, C1, V1, V2);
    verifyDatasetMetrics(metrics, writes, ++reads);
    table.increment(new Increment(R2, C2, 1L));
    if (readless) {
        verifyDatasetMetrics(metrics, ++writes, reads);
    } else {
        verifyDatasetMetrics(metrics, ++writes, ++reads);
    }
    table.incrementAndGet(new Increment(R2, C2, 1L));
    verifyDatasetMetrics(metrics, ++writes, ++reads);
    table.get(new Get(R1, C1, V1));
    verifyDatasetMetrics(metrics, writes, ++reads);
    Scanner scanner = table.scan(new Scan(null, null));
    while (scanner.next() != null) {
        verifyDatasetMetrics(metrics, writes, ++reads);
    }
    table.delete(new Delete(R1, C1, V1));
    verifyDatasetMetrics(metrics, ++writes, reads);
    // drop table
    admin.drop();
}
Also used : MetricsCollector(co.cask.cdap.api.metrics.MetricsCollector) Delete(co.cask.cdap.api.dataset.table.Delete) Scanner(co.cask.cdap.api.dataset.table.Scanner) Table(co.cask.cdap.api.dataset.table.Table) HBaseTable(co.cask.cdap.data2.dataset2.lib.table.hbase.HBaseTable) DatasetProperties(co.cask.cdap.api.dataset.DatasetProperties) DatasetAdmin(co.cask.cdap.api.dataset.DatasetAdmin) Put(co.cask.cdap.api.dataset.table.Put) Transaction(org.apache.tephra.Transaction) TransactionAware(org.apache.tephra.TransactionAware) Increment(co.cask.cdap.api.dataset.table.Increment) Get(co.cask.cdap.api.dataset.table.Get) MeteredDataset(co.cask.cdap.api.dataset.metrics.MeteredDataset) Scan(co.cask.cdap.api.dataset.table.Scan)

Example 49 with Scanner

use of co.cask.cdap.api.dataset.table.Scanner in project cdap by caskdata.

the class TableTest method verifyScanWithFuzzyRowFilter.

private static void verifyScanWithFuzzyRowFilter(Table table) {
    FuzzyRowFilter filter = new FuzzyRowFilter(ImmutableList.of(ImmutablePair.of(new byte[] { '*', 'b', '*', 'b' }, new byte[] { 0x01, 0x00, 0x01, 0x00 })));
    Scanner scanner = table.scan(new Scan(null, null, filter));
    int count = 0;
    while (true) {
        Row entry = scanner.next();
        if (entry == null) {
            break;
        }
        Assert.assertTrue(entry.getRow()[1] == 'b' && entry.getRow()[3] == 'b');
        Assert.assertEquals(1, entry.getColumns().size());
        Assert.assertTrue(entry.getColumns().containsKey(C1));
        Assert.assertArrayEquals(V1, entry.get(C1));
        count++;
    }
    Assert.assertEquals(9, count);
}
Also used : Scanner(co.cask.cdap.api.dataset.table.Scanner) Scan(co.cask.cdap.api.dataset.table.Scan) Row(co.cask.cdap.api.dataset.table.Row)

Example 50 with Scanner

use of co.cask.cdap.api.dataset.table.Scanner in project cdap by caskdata.

the class TableTest method countRows.

private static int countRows(Table table) throws Exception {
    Scanner scanner = table.scan(null, null);
    int count = 0;
    while (scanner.next() != null) {
        count++;
    }
    return count;
}
Also used : Scanner(co.cask.cdap.api.dataset.table.Scanner)

Aggregations

Scanner (co.cask.cdap.api.dataset.table.Scanner)78 Row (co.cask.cdap.api.dataset.table.Row)67 Scan (co.cask.cdap.api.dataset.table.Scan)14 ArrayList (java.util.ArrayList)14 Test (org.junit.Test)13 Table (co.cask.cdap.api.dataset.table.Table)12 Map (java.util.Map)11 DatasetId (co.cask.cdap.proto.id.DatasetId)8 TransactionExecutor (org.apache.tephra.TransactionExecutor)8 MDSKey (co.cask.cdap.data2.dataset2.lib.table.MDSKey)6 QueueEntryRow (co.cask.cdap.data2.transaction.queue.QueueEntryRow)6 IOException (java.io.IOException)6 HashMap (java.util.HashMap)6 Put (co.cask.cdap.api.dataset.table.Put)5 ImmutableMap (com.google.common.collect.ImmutableMap)5 SortedMap (java.util.SortedMap)5 DatasetProperties (co.cask.cdap.api.dataset.DatasetProperties)4 Get (co.cask.cdap.api.dataset.table.Get)4 FuzzyRowFilter (co.cask.cdap.data2.dataset2.lib.table.FuzzyRowFilter)4 ProgramSchedule (co.cask.cdap.internal.app.runtime.schedule.ProgramSchedule)4