use of co.cask.cdap.api.dataset.table.Scanner in project cdap by caskdata.
the class HBaseConsumerStateStore method completed.
/**
* Called by consumer to signal process completion up to the current barrier that the consumer is in.
*/
void completed(long groupId, int instanceId) {
// Get the current consumer state to get the end barrier info
ConsumerState consumerState = getConsumerState(groupId, instanceId);
QueueBarrier nextBarrier = consumerState.getNextBarrier();
if (nextBarrier == null) {
// End row shouldn't be null if this method is called
throw new IllegalArgumentException(String.format("No end barrier information for consumer. Queue: %s, GroupId: %d, InstanceId: %d", queueName, groupId, instanceId));
}
byte[] stateColumn = getConsumerStateColumn(groupId, instanceId);
// If the instance exists in the next barrier, set the start row to the barrier start
if (instanceId < nextBarrier.getGroupConfig().getGroupSize()) {
table.put(queueName.toBytes(), stateColumn, nextBarrier.getStartRow());
return;
}
// find the next start barrier that this instance needs to consume from
try (Scanner scanner = table.scan(Bytes.add(queueName.toBytes(), nextBarrier.getStartRow()), barrierScanEndRow)) {
Row row;
boolean found = false;
while (!found && (row = scanner.next()) != null) {
QueueBarrier queueBarrier = decodeBarrierInfo(row, groupId);
if (queueBarrier == null || instanceId >= queueBarrier.getGroupConfig().getGroupSize()) {
continue;
}
table.put(queueName.toBytes(), stateColumn, queueBarrier.getStartRow());
found = true;
}
if (!found) {
// Remove the state since this consumer instance is not longer active
table.delete(queueName.toBytes(), stateColumn);
}
}
}
use of co.cask.cdap.api.dataset.table.Scanner in project cdap by caskdata.
the class HBaseQueueAdmin method deleteFlowConfigs.
private void deleteFlowConfigs(FlowId flowId) throws Exception {
// It's a bit hacky here since we know how the HBaseConsumerStateStore works.
// Maybe we need another Dataset set that works across all queues.
final QueueName prefixName = QueueName.from(URI.create(QueueName.prefixForFlow(flowId)));
DatasetId stateStoreId = getStateStoreId(flowId.getNamespace());
Map<String, String> args = ImmutableMap.of(HBaseQueueDatasetModule.PROPERTY_QUEUE_NAME, prefixName.toString());
HBaseConsumerStateStore stateStore = datasetFramework.getDataset(stateStoreId, args, null);
if (stateStore == null) {
// If the state store doesn't exists, meaning there is no queue, hence nothing to do.
return;
}
try {
final Table table = stateStore.getInternalTable();
Transactions.createTransactionExecutor(txExecutorFactory, (TransactionAware) table).execute(new TransactionExecutor.Subroutine() {
@Override
public void apply() throws Exception {
// Prefix name is "/" terminated ("queue:///namespace/app/flow/"), hence the scan is unique for the flow
byte[] startRow = Bytes.toBytes(prefixName.toString());
try (Scanner scanner = table.scan(startRow, Bytes.stopKeyForPrefix(startRow))) {
Row row = scanner.next();
while (row != null) {
table.delete(row.getRow());
row = scanner.next();
}
}
}
});
} finally {
stateStore.close();
}
}
use of co.cask.cdap.api.dataset.table.Scanner in project cdap by caskdata.
the class TableTest method testMetrics.
private void testMetrics(boolean readless) throws Exception {
final String tableName = "survive";
DatasetProperties props = TableProperties.builder().setReadlessIncrementSupport(readless).build();
DatasetAdmin admin = getTableAdmin(CONTEXT1, tableName, props);
admin.create();
Table table = getTable(CONTEXT1, tableName, props);
final Map<String, Long> metrics = Maps.newHashMap();
((MeteredDataset) table).setMetricsCollector(new MetricsCollector() {
@Override
public void increment(String metricName, long value) {
Long old = metrics.get(metricName);
metrics.put(metricName, old == null ? value : old + value);
}
@Override
public void gauge(String metricName, long value) {
metrics.put(metricName, value);
}
});
// Note that we don't need to finish tx for metrics to be reported
Transaction tx0 = txClient.startShort();
((TransactionAware) table).startTx(tx0);
int writes = 0;
int reads = 0;
table.put(new Put(R1, C1, V1));
verifyDatasetMetrics(metrics, ++writes, reads);
table.compareAndSwap(R1, C1, V1, V2);
verifyDatasetMetrics(metrics, ++writes, ++reads);
// note: will not write anything as expected value will not match
table.compareAndSwap(R1, C1, V1, V2);
verifyDatasetMetrics(metrics, writes, ++reads);
table.increment(new Increment(R2, C2, 1L));
if (readless) {
verifyDatasetMetrics(metrics, ++writes, reads);
} else {
verifyDatasetMetrics(metrics, ++writes, ++reads);
}
table.incrementAndGet(new Increment(R2, C2, 1L));
verifyDatasetMetrics(metrics, ++writes, ++reads);
table.get(new Get(R1, C1, V1));
verifyDatasetMetrics(metrics, writes, ++reads);
Scanner scanner = table.scan(new Scan(null, null));
while (scanner.next() != null) {
verifyDatasetMetrics(metrics, writes, ++reads);
}
table.delete(new Delete(R1, C1, V1));
verifyDatasetMetrics(metrics, ++writes, reads);
// drop table
admin.drop();
}
use of co.cask.cdap.api.dataset.table.Scanner in project cdap by caskdata.
the class TableTest method verifyScanWithFuzzyRowFilter.
private static void verifyScanWithFuzzyRowFilter(Table table) {
FuzzyRowFilter filter = new FuzzyRowFilter(ImmutableList.of(ImmutablePair.of(new byte[] { '*', 'b', '*', 'b' }, new byte[] { 0x01, 0x00, 0x01, 0x00 })));
Scanner scanner = table.scan(new Scan(null, null, filter));
int count = 0;
while (true) {
Row entry = scanner.next();
if (entry == null) {
break;
}
Assert.assertTrue(entry.getRow()[1] == 'b' && entry.getRow()[3] == 'b');
Assert.assertEquals(1, entry.getColumns().size());
Assert.assertTrue(entry.getColumns().containsKey(C1));
Assert.assertArrayEquals(V1, entry.get(C1));
count++;
}
Assert.assertEquals(9, count);
}
use of co.cask.cdap.api.dataset.table.Scanner in project cdap by caskdata.
the class TableTest method countRows.
private static int countRows(Table table) throws Exception {
Scanner scanner = table.scan(null, null);
int count = 0;
while (scanner.next() != null) {
count++;
}
return count;
}
Aggregations