use of io.cdap.cdap.api.dataset.table.Increment in project cdap by caskdata.
the class TableTest method testMetrics.
private void testMetrics(boolean readless) throws Exception {
final String tableName = "survive";
DatasetProperties props = TableProperties.builder().setReadlessIncrementSupport(readless).build();
DatasetAdmin admin = getTableAdmin(CONTEXT1, tableName, props);
admin.create();
try (Table table = getTable(CONTEXT1, tableName, props)) {
final Map<String, Long> metrics = Maps.newHashMap();
((MeteredDataset) table).setMetricsCollector(new MetricsCollector() {
@Override
public void increment(String metricName, long value) {
Long old = metrics.get(metricName);
metrics.put(metricName, old == null ? value : old + value);
}
@Override
public void gauge(String metricName, long value) {
metrics.put(metricName, value);
}
});
// Note that we don't need to finish tx for metrics to be reported
Transaction tx0 = txClient.startShort();
((TransactionAware) table).startTx(tx0);
int writes = 0;
int reads = 0;
table.put(new Put(R1, C1, V1));
verifyDatasetMetrics(metrics, ++writes, reads);
table.compareAndSwap(R1, C1, V1, V2);
verifyDatasetMetrics(metrics, ++writes, ++reads);
// note: will not write anything as expected value will not match
table.compareAndSwap(R1, C1, V1, V2);
verifyDatasetMetrics(metrics, writes, ++reads);
table.increment(new Increment(R2, C2, 1L));
if (readless) {
verifyDatasetMetrics(metrics, ++writes, reads);
} else {
verifyDatasetMetrics(metrics, ++writes, ++reads);
}
table.incrementAndGet(new Increment(R2, C2, 1L));
verifyDatasetMetrics(metrics, ++writes, ++reads);
table.get(new Get(R1, C1, V1));
verifyDatasetMetrics(metrics, writes, ++reads);
Scanner scanner = table.scan(new Scan(null, null));
while (scanner.next() != null) {
verifyDatasetMetrics(metrics, writes, ++reads);
}
table.delete(new Delete(R1, C1, V1));
verifyDatasetMetrics(metrics, ++writes, reads);
} finally {
// drop table
admin.drop();
}
}
use of io.cdap.cdap.api.dataset.table.Increment in project cdap by caskdata.
the class CharCountProgram method run.
@Override
public void run(final JavaSparkExecutionContext sec) throws Exception {
JavaSparkContext sc = new JavaSparkContext();
// Verify the codec is being set
Preconditions.checkArgument("org.apache.spark.io.LZFCompressionCodec".equals(sc.getConf().get("spark.io.compression.codec")));
// read the dataset
JavaPairRDD<byte[], String> inputData = sec.fromDataset("keys");
// create a new RDD with the same key but with a new value which is the length of the string
final JavaPairRDD<byte[], byte[]> stringLengths = inputData.mapToPair(new PairFunction<Tuple2<byte[], String>, byte[], byte[]>() {
@Override
public Tuple2<byte[], byte[]> call(Tuple2<byte[], String> stringTuple2) throws Exception {
return new Tuple2<>(stringTuple2._1(), Bytes.toBytes(stringTuple2._2().length()));
}
});
// write a total count to a table (that emits a metric we can validate in the test case)
sec.execute(new TxRunnable() {
@Override
public void run(DatasetContext context) throws Exception {
long count = stringLengths.count();
Table totals = context.getDataset("totals");
totals.increment(new Increment("total").add("total", count));
// write the character count to dataset
sec.saveAsDataset(stringLengths, "count");
}
});
}
use of io.cdap.cdap.api.dataset.table.Increment in project cdap by caskdata.
the class TableTest method testMultiIncrementWithFlush.
private void testMultiIncrementWithFlush(boolean readless) throws Exception {
final String tableName = "incrFlush";
DatasetProperties props = TableProperties.builder().setReadlessIncrementSupport(readless).build();
DatasetAdmin admin = getTableAdmin(CONTEXT1, tableName, props);
admin.create();
Map<String, String> args = new HashMap<>();
if (readless) {
args.put(HBaseTable.SAFE_INCREMENTS, "true");
}
try (Table table = getTable(CONTEXT1, tableName, props, args);
Table table2 = getTable(CONTEXT1, tableName, props, args)) {
Transaction tx = txClient.startShort();
try {
((TransactionAware) table).startTx(tx);
// Write an increment, then flush it by calling commitTx.
table.increment(new Increment(R1, C1, 10L));
((TransactionAware) table).commitTx();
} finally {
// invalidate the tx, leaving an excluded write in the table
txClient.invalidate(tx.getTransactionId());
}
// validate the first write is not visible
tx = txClient.startShort();
try {
((TransactionAware) table).startTx(tx);
Assert.assertEquals(null, table.get(new Get(R1, C1)).getLong(C1));
} finally {
txClient.commitOrThrow(tx);
}
tx = txClient.startShort();
try {
((TransactionAware) table).startTx(tx);
// Write an increment, then flush it by calling commitTx.
table.increment(new Increment(R1, C1, 1L));
((TransactionAware) table).commitTx();
// Write another increment, from both table instances
table.increment(new Increment(R1, C1, 1L));
if (readless) {
((TransactionAware) table2).startTx(tx);
table2.increment(new Increment(R1, C1, 1L));
((TransactionAware) table2).commitTx();
}
((TransactionAware) table).commitTx();
} finally {
txClient.commitOrThrow(tx);
}
// validate all increments are visible to a new tx
tx = txClient.startShort();
try {
((TransactionAware) table).startTx(tx);
Assert.assertEquals(new Long(readless ? 3L : 2L), table.get(new Get(R1, C1)).getLong(C1));
} finally {
txClient.commitOrThrow(tx);
}
} finally {
// drop table
admin.drop();
}
}
use of io.cdap.cdap.api.dataset.table.Increment in project cdap by caskdata.
the class MetricsTableOnTable method increment.
@Override
public void increment(byte[] row, Map<byte[], Long> increments) {
Increment increment = new Increment(row);
for (Map.Entry<byte[], Long> columnUpdate : increments.entrySet()) {
increment.add(columnUpdate.getKey(), columnUpdate.getValue());
}
table.increment(increment);
}
use of io.cdap.cdap.api.dataset.table.Increment in project cdap by caskdata.
the class MetricsTableOnTable method increment.
@Override
public void increment(NavigableMap<byte[], NavigableMap<byte[], Long>> updates) {
for (Map.Entry<byte[], NavigableMap<byte[], Long>> rowUpdate : updates.entrySet()) {
Increment increment = new Increment(rowUpdate.getKey());
for (Map.Entry<byte[], Long> columnUpdate : rowUpdate.getValue().entrySet()) {
increment.add(columnUpdate.getKey(), columnUpdate.getValue());
}
table.increment(increment);
}
}
Aggregations