Search in sources :

Example 1 with Increment

use of co.cask.cdap.api.dataset.table.Increment in project cdap by caskdata.

the class TableTest method testMultiIncrementWithFlush.

private void testMultiIncrementWithFlush(boolean readless) throws Exception {
    final String tableName = "incrFlush";
    DatasetProperties props = TableProperties.builder().setReadlessIncrementSupport(readless).build();
    DatasetAdmin admin = getTableAdmin(CONTEXT1, tableName, props);
    admin.create();
    Map<String, String> args = new HashMap<>();
    if (readless) {
        args.put(HBaseTable.SAFE_INCREMENTS, "true");
    }
    Table table = getTable(CONTEXT1, tableName, props, args);
    Transaction tx = txClient.startShort();
    try {
        ((TransactionAware) table).startTx(tx);
        // Write an increment, then flush it by calling commitTx.
        table.increment(new Increment(R1, C1, 10L));
        ((TransactionAware) table).commitTx();
    } finally {
        // invalidate the tx, leaving an excluded write in the table
        txClient.invalidate(tx.getTransactionId());
    }
    // validate the first write is not visible
    tx = txClient.startShort();
    try {
        ((TransactionAware) table).startTx(tx);
        Assert.assertEquals(null, table.get(new Get(R1, C1)).getLong(C1));
    } finally {
        txClient.commit(tx);
    }
    tx = txClient.startShort();
    try {
        ((TransactionAware) table).startTx(tx);
        // Write an increment, then flush it by calling commitTx.
        table.increment(new Increment(R1, C1, 1L));
        ((TransactionAware) table).commitTx();
        // Write another increment, from both table instances
        table.increment(new Increment(R1, C1, 1L));
        if (readless) {
            Table table2 = getTable(CONTEXT1, tableName, props, args);
            ((TransactionAware) table2).startTx(tx);
            table2.increment(new Increment(R1, C1, 1L));
            ((TransactionAware) table2).commitTx();
        }
        ((TransactionAware) table).commitTx();
    } finally {
        txClient.commit(tx);
    }
    // validate all increments are visible to a new tx
    tx = txClient.startShort();
    try {
        ((TransactionAware) table).startTx(tx);
        Assert.assertEquals(new Long(readless ? 3L : 2L), table.get(new Get(R1, C1)).getLong(C1));
    } finally {
        txClient.commit(tx);
    }
    // drop table
    admin.drop();
}
Also used : Table(co.cask.cdap.api.dataset.table.Table) HBaseTable(co.cask.cdap.data2.dataset2.lib.table.hbase.HBaseTable) Transaction(org.apache.tephra.Transaction) HashMap(java.util.HashMap) TransactionAware(org.apache.tephra.TransactionAware) DatasetProperties(co.cask.cdap.api.dataset.DatasetProperties) Increment(co.cask.cdap.api.dataset.table.Increment) Get(co.cask.cdap.api.dataset.table.Get) DatasetAdmin(co.cask.cdap.api.dataset.DatasetAdmin)

Example 2 with Increment

use of co.cask.cdap.api.dataset.table.Increment in project cdap by caskdata.

the class WordSplitter method process.

@ProcessInput
public void process(StreamEvent event) {
    // Input is a String, need to split it by whitespace
    String inputString = Charset.forName("UTF-8").decode(event.getBody()).toString();
    String[] words = inputString.split("\\s+");
    List<String> wordList = new ArrayList<>(words.length);
    long sumOfLengths = 0;
    long wordCount = 0;
    // We have an array of words, now remove all non-alpha characters
    for (String word : words) {
        word = word.replaceAll("[^A-Za-z]", "");
        if (!word.isEmpty()) {
            // emit every word that remains
            wordOutput.emit(word);
            wordList.add(word);
            sumOfLengths += word.length();
            wordCount++;
        }
    }
    // Count other word statistics (word length, total words seen)
    this.wordStatsTable.increment(new Increment("totals").add("total_length", sumOfLengths).add("total_words", wordCount));
    // Send the list of words to the associater
    wordListOutput.emit(wordList);
}
Also used : Increment(co.cask.cdap.api.dataset.table.Increment) ArrayList(java.util.ArrayList) ProcessInput(co.cask.cdap.api.annotation.ProcessInput)

Example 3 with Increment

use of co.cask.cdap.api.dataset.table.Increment in project cdap by caskdata.

the class MetricsTableOnTable method increment.

@Override
public void increment(NavigableMap<byte[], NavigableMap<byte[], Long>> updates) {
    for (Map.Entry<byte[], NavigableMap<byte[], Long>> rowUpdate : updates.entrySet()) {
        Increment increment = new Increment(rowUpdate.getKey());
        for (Map.Entry<byte[], Long> columnUpdate : rowUpdate.getValue().entrySet()) {
            increment.add(columnUpdate.getKey(), columnUpdate.getValue());
        }
        table.increment(increment);
    }
}
Also used : NavigableMap(java.util.NavigableMap) Increment(co.cask.cdap.api.dataset.table.Increment) Map(java.util.Map) NavigableMap(java.util.NavigableMap) SortedMap(java.util.SortedMap)

Example 4 with Increment

use of co.cask.cdap.api.dataset.table.Increment in project cdap by caskdata.

the class TableTest method testMetrics.

private void testMetrics(boolean readless) throws Exception {
    final String tableName = "survive";
    DatasetProperties props = TableProperties.builder().setReadlessIncrementSupport(readless).build();
    DatasetAdmin admin = getTableAdmin(CONTEXT1, tableName, props);
    admin.create();
    Table table = getTable(CONTEXT1, tableName, props);
    final Map<String, Long> metrics = Maps.newHashMap();
    ((MeteredDataset) table).setMetricsCollector(new MetricsCollector() {

        @Override
        public void increment(String metricName, long value) {
            Long old = metrics.get(metricName);
            metrics.put(metricName, old == null ? value : old + value);
        }

        @Override
        public void gauge(String metricName, long value) {
            metrics.put(metricName, value);
        }
    });
    // Note that we don't need to finish tx for metrics to be reported
    Transaction tx0 = txClient.startShort();
    ((TransactionAware) table).startTx(tx0);
    int writes = 0;
    int reads = 0;
    table.put(new Put(R1, C1, V1));
    verifyDatasetMetrics(metrics, ++writes, reads);
    table.compareAndSwap(R1, C1, V1, V2);
    verifyDatasetMetrics(metrics, ++writes, ++reads);
    // note: will not write anything as expected value will not match
    table.compareAndSwap(R1, C1, V1, V2);
    verifyDatasetMetrics(metrics, writes, ++reads);
    table.increment(new Increment(R2, C2, 1L));
    if (readless) {
        verifyDatasetMetrics(metrics, ++writes, reads);
    } else {
        verifyDatasetMetrics(metrics, ++writes, ++reads);
    }
    table.incrementAndGet(new Increment(R2, C2, 1L));
    verifyDatasetMetrics(metrics, ++writes, ++reads);
    table.get(new Get(R1, C1, V1));
    verifyDatasetMetrics(metrics, writes, ++reads);
    Scanner scanner = table.scan(new Scan(null, null));
    while (scanner.next() != null) {
        verifyDatasetMetrics(metrics, writes, ++reads);
    }
    table.delete(new Delete(R1, C1, V1));
    verifyDatasetMetrics(metrics, ++writes, reads);
    // drop table
    admin.drop();
}
Also used : MetricsCollector(co.cask.cdap.api.metrics.MetricsCollector) Delete(co.cask.cdap.api.dataset.table.Delete) Scanner(co.cask.cdap.api.dataset.table.Scanner) Table(co.cask.cdap.api.dataset.table.Table) HBaseTable(co.cask.cdap.data2.dataset2.lib.table.hbase.HBaseTable) DatasetProperties(co.cask.cdap.api.dataset.DatasetProperties) DatasetAdmin(co.cask.cdap.api.dataset.DatasetAdmin) Put(co.cask.cdap.api.dataset.table.Put) Transaction(org.apache.tephra.Transaction) TransactionAware(org.apache.tephra.TransactionAware) Increment(co.cask.cdap.api.dataset.table.Increment) Get(co.cask.cdap.api.dataset.table.Get) MeteredDataset(co.cask.cdap.api.dataset.metrics.MeteredDataset) Scan(co.cask.cdap.api.dataset.table.Scan)

Example 5 with Increment

use of co.cask.cdap.api.dataset.table.Increment in project cdap by caskdata.

the class MetricsTableOnTable method increment.

@Override
public void increment(byte[] row, Map<byte[], Long> increments) {
    Increment increment = new Increment(row);
    for (Map.Entry<byte[], Long> columnUpdate : increments.entrySet()) {
        increment.add(columnUpdate.getKey(), columnUpdate.getValue());
    }
    table.increment(increment);
}
Also used : Increment(co.cask.cdap.api.dataset.table.Increment) Map(java.util.Map) NavigableMap(java.util.NavigableMap) SortedMap(java.util.SortedMap)

Aggregations

Increment (co.cask.cdap.api.dataset.table.Increment)6 Table (co.cask.cdap.api.dataset.table.Table)3 DatasetAdmin (co.cask.cdap.api.dataset.DatasetAdmin)2 DatasetProperties (co.cask.cdap.api.dataset.DatasetProperties)2 Get (co.cask.cdap.api.dataset.table.Get)2 HBaseTable (co.cask.cdap.data2.dataset2.lib.table.hbase.HBaseTable)2 Map (java.util.Map)2 NavigableMap (java.util.NavigableMap)2 SortedMap (java.util.SortedMap)2 Transaction (org.apache.tephra.Transaction)2 TransactionAware (org.apache.tephra.TransactionAware)2 TxRunnable (co.cask.cdap.api.TxRunnable)1 ProcessInput (co.cask.cdap.api.annotation.ProcessInput)1 DatasetContext (co.cask.cdap.api.data.DatasetContext)1 MeteredDataset (co.cask.cdap.api.dataset.metrics.MeteredDataset)1 Delete (co.cask.cdap.api.dataset.table.Delete)1 Put (co.cask.cdap.api.dataset.table.Put)1 Scan (co.cask.cdap.api.dataset.table.Scan)1 Scanner (co.cask.cdap.api.dataset.table.Scanner)1 MetricsCollector (co.cask.cdap.api.metrics.MetricsCollector)1