use of io.cdap.cdap.data2.util.hbase.ScanBuilder in project cdap by caskdata.
the class HBaseQueueDebugger method scanQueue.
private void scanQueue(TransactionExecutor txExecutor, HBaseConsumerStateStore stateStore, QueueName queueName, QueueBarrier start, @Nullable QueueBarrier end, final QueueStatistics outStats) throws Exception {
final byte[] queueRowPrefix = QueueEntryRow.getQueueRowPrefix(queueName);
ConsumerGroupConfig groupConfig = start.getGroupConfig();
printProgress("Got consumer group config: %s\n", groupConfig);
HBaseQueueAdmin admin = queueClientFactory.getQueueAdmin();
TableId tableId = admin.getDataTableId(queueName, QueueConstants.QueueType.SHARDED_QUEUE);
HTable hTable = queueClientFactory.createHTable(tableId);
printProgress("Looking at HBase table: %s\n", Bytes.toString(hTable.getTableName()));
final byte[] stateColumnName = Bytes.add(QueueEntryRow.STATE_COLUMN_PREFIX, Bytes.toBytes(groupConfig.getGroupId()));
int distributorBuckets = queueClientFactory.getDistributorBuckets(hTable.getTableDescriptor());
ShardedHBaseQueueStrategy queueStrategy = new ShardedHBaseQueueStrategy(tableUtil, distributorBuckets);
ScanBuilder scan = tableUtil.buildScan();
scan.setStartRow(start.getStartRow());
if (end != null) {
scan.setStopRow(end.getStartRow());
} else {
scan.setStopRow(QueueEntryRow.getQueueEntryRowKey(queueName, Long.MAX_VALUE, Integer.MAX_VALUE));
}
// Needs to include meta column for row that doesn't have state yet.
scan.addColumn(QueueEntryRow.COLUMN_FAMILY, QueueEntryRow.META_COLUMN);
scan.addColumn(QueueEntryRow.COLUMN_FAMILY, stateColumnName);
// Don't do block cache for debug tool. We don't want old blocks get cached
scan.setCacheBlocks(false);
scan.setMaxVersions(1);
printProgress("Scanning section with scan: %s\n", scan.toString());
List<Integer> instanceIds = Lists.newArrayList();
if (groupConfig.getDequeueStrategy() == DequeueStrategy.FIFO) {
instanceIds.add(0);
} else {
for (int instanceId = 0; instanceId < groupConfig.getGroupSize(); instanceId++) {
instanceIds.add(instanceId);
}
}
final int rowsCache = Integer.parseInt(System.getProperty(PROP_ROWS_CACHE, "100000"));
for (final int instanceId : instanceIds) {
printProgress("Processing instance %d", instanceId);
ConsumerConfig consConfig = new ConsumerConfig(groupConfig, instanceId);
final QueueScanner scanner = queueStrategy.createScanner(consConfig, hTable, scan.build(), rowsCache);
try {
txExecutor.execute(new TransactionExecutor.Procedure<HBaseConsumerStateStore>() {
@Override
public void apply(HBaseConsumerStateStore input) throws Exception {
ImmutablePair<byte[], Map<byte[], byte[]>> result;
while ((result = scanner.next()) != null) {
byte[] rowKey = result.getFirst();
Map<byte[], byte[]> columns = result.getSecond();
visitRow(outStats, input.getTransaction(), rowKey, columns.get(stateColumnName), queueRowPrefix.length);
if (showProgress() && outStats.getTotal() % rowsCache == 0) {
System.out.printf("\rProcessing instance %d: %s", instanceId, outStats.getReport(showTxTimestampOnly()));
}
}
}
}, stateStore);
} catch (TransactionFailureException e) {
// Ignore transaction not in progress exception as it's caused by short TX timeout on commit
if (!(Throwables.getRootCause(e) instanceof TransactionNotInProgressException)) {
throw Throwables.propagate(e);
}
}
printProgress("\rProcessing instance %d: %s\n", instanceId, outStats.getReport(showTxTimestampOnly()));
}
}
use of io.cdap.cdap.data2.util.hbase.ScanBuilder in project cdap by caskdata.
the class ReplicationStatusTool method getMapFromTable.
private static Map<String, Long> getMapFromTable(String rowType) throws IOException {
HBaseTableUtil tableUtil = new HBaseTableUtilFactory(cConf).get();
HTable hTable = tableUtil.createHTable(hConf, getReplicationStateTableId(tableUtil));
// Scan the table to scan for all regions.
ScanBuilder scan = getScanBuilder(tableUtil, rowType);
Result result;
HashMap<String, Long> timeMap = new HashMap<>();
try (ResultScanner resultScanner = hTable.getScanner(scan.build())) {
while ((result = resultScanner.next()) != null) {
ReplicationStatusKey key = new ReplicationStatusKey(result.getRow());
String region = key.getRegionName();
Long timestamp = getTimeFromResult(result, rowType);
if (timeMap.get(region) == null || timestamp > timeMap.get(region)) {
timeMap.put(region, timestamp);
}
}
} catch (Exception e) {
LOG.error("Error while reading table.", e);
throw Throwables.propagate(e);
} finally {
hTable.close();
}
return timeMap;
}
use of io.cdap.cdap.data2.util.hbase.ScanBuilder in project cdap by caskdata.
the class HBaseTable method setFilterIfNeeded.
private void setFilterIfNeeded(ScanBuilder scan, @Nullable Filter filter) {
if (filter == null) {
return;
}
if (filter instanceof FuzzyRowFilter) {
FuzzyRowFilter fuzzyRowFilter = (FuzzyRowFilter) filter;
List<Pair<byte[], byte[]>> fuzzyPairs = Lists.newArrayListWithExpectedSize(fuzzyRowFilter.getFuzzyKeysData().size());
for (ImmutablePair<byte[], byte[]> pair : fuzzyRowFilter.getFuzzyKeysData()) {
fuzzyPairs.add(Pair.newPair(pair.getFirst(), pair.getSecond()));
}
scan.setFilter(new org.apache.hadoop.hbase.filter.FuzzyRowFilter(fuzzyPairs));
} else {
throw new IllegalArgumentException("Unsupported filter: " + filter);
}
}
use of io.cdap.cdap.data2.util.hbase.ScanBuilder in project cdap by caskdata.
the class ReplicationStatusTool method dumpReplicationStateTable.
private static void dumpReplicationStateTable() throws Exception {
System.out.println("\nThis is all the HBase regions on the Cluster:");
HBaseTableUtil tableUtil = new HBaseTableUtilFactory(cConf).get();
ScanBuilder scan = tableUtil.buildScan();
scan.addColumn(Bytes.toBytes(ReplicationConstants.ReplicationStatusTool.TIME_FAMILY), Bytes.toBytes(ReplicationConstants.ReplicationStatusTool.WRITE_TIME_ROW_TYPE));
scan.addColumn(Bytes.toBytes(ReplicationConstants.ReplicationStatusTool.TIME_FAMILY), Bytes.toBytes(ReplicationConstants.ReplicationStatusTool.REPLICATE_TIME_ROW_TYPE));
try (Table table = tableUtil.createTable(hConf, getReplicationStateTableId(tableUtil));
ResultScanner resultScanner = table.getScanner(scan.build())) {
Result result;
while ((result = resultScanner.next()) != null) {
ReplicationStatusKey key = new ReplicationStatusKey(result.getRow());
String rowType = key.getRowType();
String region = key.getRegionName();
UUID rsID = key.getRsID();
Long writeTime = getTimeFromResult(result, ReplicationConstants.ReplicationStatusTool.WRITE_TIME_ROW_TYPE);
Long replicateTime = getTimeFromResult(result, ReplicationConstants.ReplicationStatusTool.REPLICATE_TIME_ROW_TYPE);
System.out.println("Key=>rowType:" + rowType + ":region:" + region + ":RSID:" + rsID + " writeTime:" + writeTime + ":replicateTime:" + replicateTime);
}
}
}
use of io.cdap.cdap.data2.util.hbase.ScanBuilder in project cdap by caskdata.
the class SaltedHBaseQueueStrategy method createScanner.
@Override
public QueueScanner createScanner(ConsumerConfig consumerConfig, HTable hTable, Scan scan, int numRows) throws IOException {
// we should roughly divide by number of buckets, but don't want another RPC for the case we are not exactly right
ScanBuilder distributedScan = tableUtil.buildScan(scan);
int caching = (int) (1.1 * numRows / distributorBuckets);
distributedScan.setCaching(caching);
ResultScanner scanner = DistributedScanner.create(hTable, distributedScan.build(), rowKeyDistributor, scansExecutor);
return new HBaseQueueScanner(scanner, numRows, rowKeyConverter);
}
Aggregations