Search in sources :

Example 6 with Increment

use of co.cask.cdap.api.dataset.table.Increment in project cdap by caskdata.

the class TableTest method testMetrics.

private void testMetrics(boolean readless) throws Exception {
    final String tableName = "survive";
    DatasetProperties props = TableProperties.builder().setReadlessIncrementSupport(readless).build();
    DatasetAdmin admin = getTableAdmin(CONTEXT1, tableName, props);
    admin.create();
    Table table = getTable(CONTEXT1, tableName, props);
    final Map<String, Long> metrics = Maps.newHashMap();
    ((MeteredDataset) table).setMetricsCollector(new MetricsCollector() {

        @Override
        public void increment(String metricName, long value) {
            Long old = metrics.get(metricName);
            metrics.put(metricName, old == null ? value : old + value);
        }

        @Override
        public void gauge(String metricName, long value) {
            metrics.put(metricName, value);
        }
    });
    // Note that we don't need to finish tx for metrics to be reported
    Transaction tx0 = txClient.startShort();
    ((TransactionAware) table).startTx(tx0);
    int writes = 0;
    int reads = 0;
    table.put(new Put(R1, C1, V1));
    verifyDatasetMetrics(metrics, ++writes, reads);
    table.compareAndSwap(R1, C1, V1, V2);
    verifyDatasetMetrics(metrics, ++writes, ++reads);
    // note: will not write anything as expected value will not match
    table.compareAndSwap(R1, C1, V1, V2);
    verifyDatasetMetrics(metrics, writes, ++reads);
    table.increment(new Increment(R2, C2, 1L));
    if (readless) {
        verifyDatasetMetrics(metrics, ++writes, reads);
    } else {
        verifyDatasetMetrics(metrics, ++writes, ++reads);
    }
    table.incrementAndGet(new Increment(R2, C2, 1L));
    verifyDatasetMetrics(metrics, ++writes, ++reads);
    table.get(new Get(R1, C1, V1));
    verifyDatasetMetrics(metrics, writes, ++reads);
    Scanner scanner = table.scan(new Scan(null, null));
    while (scanner.next() != null) {
        verifyDatasetMetrics(metrics, writes, ++reads);
    }
    table.delete(new Delete(R1, C1, V1));
    verifyDatasetMetrics(metrics, ++writes, reads);
    // drop table
    admin.drop();
}
Also used : MetricsCollector(co.cask.cdap.api.metrics.MetricsCollector) Delete(co.cask.cdap.api.dataset.table.Delete) Scanner(co.cask.cdap.api.dataset.table.Scanner) Table(co.cask.cdap.api.dataset.table.Table) HBaseTable(co.cask.cdap.data2.dataset2.lib.table.hbase.HBaseTable) DatasetProperties(co.cask.cdap.api.dataset.DatasetProperties) DatasetAdmin(co.cask.cdap.api.dataset.DatasetAdmin) Put(co.cask.cdap.api.dataset.table.Put) Transaction(org.apache.tephra.Transaction) TransactionAware(org.apache.tephra.TransactionAware) Increment(co.cask.cdap.api.dataset.table.Increment) Get(co.cask.cdap.api.dataset.table.Get) MeteredDataset(co.cask.cdap.api.dataset.metrics.MeteredDataset) Scan(co.cask.cdap.api.dataset.table.Scan)

Aggregations

Increment (co.cask.cdap.api.dataset.table.Increment)6 Table (co.cask.cdap.api.dataset.table.Table)3 DatasetAdmin (co.cask.cdap.api.dataset.DatasetAdmin)2 DatasetProperties (co.cask.cdap.api.dataset.DatasetProperties)2 Get (co.cask.cdap.api.dataset.table.Get)2 HBaseTable (co.cask.cdap.data2.dataset2.lib.table.hbase.HBaseTable)2 Map (java.util.Map)2 NavigableMap (java.util.NavigableMap)2 SortedMap (java.util.SortedMap)2 Transaction (org.apache.tephra.Transaction)2 TransactionAware (org.apache.tephra.TransactionAware)2 TxRunnable (co.cask.cdap.api.TxRunnable)1 ProcessInput (co.cask.cdap.api.annotation.ProcessInput)1 DatasetContext (co.cask.cdap.api.data.DatasetContext)1 MeteredDataset (co.cask.cdap.api.dataset.metrics.MeteredDataset)1 Delete (co.cask.cdap.api.dataset.table.Delete)1 Put (co.cask.cdap.api.dataset.table.Put)1 Scan (co.cask.cdap.api.dataset.table.Scan)1 Scanner (co.cask.cdap.api.dataset.table.Scanner)1 MetricsCollector (co.cask.cdap.api.metrics.MetricsCollector)1