use of co.cask.cdap.api.dataset.table.Increment in project cdap by caskdata.
the class TableTest method testMultiIncrementWithFlush.
private void testMultiIncrementWithFlush(boolean readless) throws Exception {
final String tableName = "incrFlush";
DatasetProperties props = TableProperties.builder().setReadlessIncrementSupport(readless).build();
DatasetAdmin admin = getTableAdmin(CONTEXT1, tableName, props);
admin.create();
Map<String, String> args = new HashMap<>();
if (readless) {
args.put(HBaseTable.SAFE_INCREMENTS, "true");
}
Table table = getTable(CONTEXT1, tableName, props, args);
Transaction tx = txClient.startShort();
try {
((TransactionAware) table).startTx(tx);
// Write an increment, then flush it by calling commitTx.
table.increment(new Increment(R1, C1, 10L));
((TransactionAware) table).commitTx();
} finally {
// invalidate the tx, leaving an excluded write in the table
txClient.invalidate(tx.getTransactionId());
}
// validate the first write is not visible
tx = txClient.startShort();
try {
((TransactionAware) table).startTx(tx);
Assert.assertEquals(null, table.get(new Get(R1, C1)).getLong(C1));
} finally {
txClient.commit(tx);
}
tx = txClient.startShort();
try {
((TransactionAware) table).startTx(tx);
// Write an increment, then flush it by calling commitTx.
table.increment(new Increment(R1, C1, 1L));
((TransactionAware) table).commitTx();
// Write another increment, from both table instances
table.increment(new Increment(R1, C1, 1L));
if (readless) {
Table table2 = getTable(CONTEXT1, tableName, props, args);
((TransactionAware) table2).startTx(tx);
table2.increment(new Increment(R1, C1, 1L));
((TransactionAware) table2).commitTx();
}
((TransactionAware) table).commitTx();
} finally {
txClient.commit(tx);
}
// validate all increments are visible to a new tx
tx = txClient.startShort();
try {
((TransactionAware) table).startTx(tx);
Assert.assertEquals(new Long(readless ? 3L : 2L), table.get(new Get(R1, C1)).getLong(C1));
} finally {
txClient.commit(tx);
}
// drop table
admin.drop();
}
use of co.cask.cdap.api.dataset.table.Increment in project cdap by caskdata.
the class WordSplitter method process.
@ProcessInput
public void process(StreamEvent event) {
// Input is a String, need to split it by whitespace
String inputString = Charset.forName("UTF-8").decode(event.getBody()).toString();
String[] words = inputString.split("\\s+");
List<String> wordList = new ArrayList<>(words.length);
long sumOfLengths = 0;
long wordCount = 0;
// We have an array of words, now remove all non-alpha characters
for (String word : words) {
word = word.replaceAll("[^A-Za-z]", "");
if (!word.isEmpty()) {
// emit every word that remains
wordOutput.emit(word);
wordList.add(word);
sumOfLengths += word.length();
wordCount++;
}
}
// Count other word statistics (word length, total words seen)
this.wordStatsTable.increment(new Increment("totals").add("total_length", sumOfLengths).add("total_words", wordCount));
// Send the list of words to the associater
wordListOutput.emit(wordList);
}
use of co.cask.cdap.api.dataset.table.Increment in project cdap by caskdata.
the class MetricsTableOnTable method increment.
@Override
public void increment(NavigableMap<byte[], NavigableMap<byte[], Long>> updates) {
for (Map.Entry<byte[], NavigableMap<byte[], Long>> rowUpdate : updates.entrySet()) {
Increment increment = new Increment(rowUpdate.getKey());
for (Map.Entry<byte[], Long> columnUpdate : rowUpdate.getValue().entrySet()) {
increment.add(columnUpdate.getKey(), columnUpdate.getValue());
}
table.increment(increment);
}
}
use of co.cask.cdap.api.dataset.table.Increment in project cdap by caskdata.
the class TableTest method testMetrics.
private void testMetrics(boolean readless) throws Exception {
final String tableName = "survive";
DatasetProperties props = TableProperties.builder().setReadlessIncrementSupport(readless).build();
DatasetAdmin admin = getTableAdmin(CONTEXT1, tableName, props);
admin.create();
Table table = getTable(CONTEXT1, tableName, props);
final Map<String, Long> metrics = Maps.newHashMap();
((MeteredDataset) table).setMetricsCollector(new MetricsCollector() {
@Override
public void increment(String metricName, long value) {
Long old = metrics.get(metricName);
metrics.put(metricName, old == null ? value : old + value);
}
@Override
public void gauge(String metricName, long value) {
metrics.put(metricName, value);
}
});
// Note that we don't need to finish tx for metrics to be reported
Transaction tx0 = txClient.startShort();
((TransactionAware) table).startTx(tx0);
int writes = 0;
int reads = 0;
table.put(new Put(R1, C1, V1));
verifyDatasetMetrics(metrics, ++writes, reads);
table.compareAndSwap(R1, C1, V1, V2);
verifyDatasetMetrics(metrics, ++writes, ++reads);
// note: will not write anything as expected value will not match
table.compareAndSwap(R1, C1, V1, V2);
verifyDatasetMetrics(metrics, writes, ++reads);
table.increment(new Increment(R2, C2, 1L));
if (readless) {
verifyDatasetMetrics(metrics, ++writes, reads);
} else {
verifyDatasetMetrics(metrics, ++writes, ++reads);
}
table.incrementAndGet(new Increment(R2, C2, 1L));
verifyDatasetMetrics(metrics, ++writes, ++reads);
table.get(new Get(R1, C1, V1));
verifyDatasetMetrics(metrics, writes, ++reads);
Scanner scanner = table.scan(new Scan(null, null));
while (scanner.next() != null) {
verifyDatasetMetrics(metrics, writes, ++reads);
}
table.delete(new Delete(R1, C1, V1));
verifyDatasetMetrics(metrics, ++writes, reads);
// drop table
admin.drop();
}
use of co.cask.cdap.api.dataset.table.Increment in project cdap by caskdata.
the class MetricsTableOnTable method increment.
@Override
public void increment(byte[] row, Map<byte[], Long> increments) {
Increment increment = new Increment(row);
for (Map.Entry<byte[], Long> columnUpdate : increments.entrySet()) {
increment.add(columnUpdate.getKey(), columnUpdate.getValue());
}
table.increment(increment);
}
Aggregations