use of org.apache.hadoop.hbase.client.HTableInterface in project hive by apache.
the class HBaseReadWrite method multiRead.
private void multiRead(String table, byte[] colFam, byte[] colName, byte[][] keys, ByteBuffer[] resultDest) throws IOException {
assert keys.length == resultDest.length;
@SuppressWarnings("deprecation") HTableInterface htab = conn.getHBaseTable(table);
List<Get> gets = new ArrayList<>(keys.length);
for (byte[] key : keys) {
Get g = new Get(key);
g.addColumn(colFam, colName);
gets.add(g);
}
Result[] results = htab.get(gets);
for (int i = 0; i < results.length; ++i) {
Result r = results[i];
if (r.isEmpty()) {
resultDest[i] = null;
} else {
Cell cell = r.getColumnLatestCell(colFam, colName);
resultDest[i] = ByteBuffer.wrap(cell.getValueArray(), cell.getValueOffset(), cell.getValueLength());
}
}
}
use of org.apache.hadoop.hbase.client.HTableInterface in project hive by apache.
the class HBaseReadWrite method printTables.
/**
* Print tables
* @param regex to use to find the tables. Remember that dbname is in each
* table name.
* @return tables as strings
* @throws IOException
* @throws TException
*/
List<String> printTables(String regex) throws IOException, TException {
Filter filter = new RowFilter(CompareFilter.CompareOp.EQUAL, new RegexStringComparator(regex));
@SuppressWarnings("deprecation") HTableInterface htab = conn.getHBaseTable(TABLE_TABLE);
Scan scan = new Scan();
scan.addColumn(CATALOG_CF, CATALOG_COL);
scan.addFamily(STATS_CF);
scan.setFilter(filter);
Iterator<Result> iter = htab.getScanner(scan).iterator();
if (!iter.hasNext())
return noMatch(regex, "table");
List<String> lines = new ArrayList<>();
while (iter.hasNext()) {
lines.add(printOneTable(iter.next()));
}
return lines;
}
use of org.apache.hadoop.hbase.client.HTableInterface in project hive by apache.
the class HBaseReadWrite method printPartitions.
/**
* Print partitions
* @param partKey a partial partition key. This must match the beginings of the partition key.
* It can be just dbname.tablename, or dbname.table.pval... where pval are the
* partition values in order. They must be in the correct order and they must
* be literal values (no regular expressions)
* @return partitions as strings
* @throws IOException
* @throws TException
*/
List<String> printPartitions(String partKey) throws IOException, TException {
// First figure out the table and fetch it
// Split on dot here rather than the standard separator because this will be passed in as a
// regex, even though we aren't fully supporting regex's.
String[] partKeyParts = partKey.split("\\.");
if (partKeyParts.length < 2)
return noMatch(partKey, "partition");
List<String> partVals = partKeyParts.length == 2 ? Arrays.asList("*") : Arrays.asList(Arrays.copyOfRange(partKeyParts, 2, partKeyParts.length));
PartitionScanInfo psi;
try {
psi = scanPartitionsInternal(partKeyParts[0], partKeyParts[1], partVals, -1);
} catch (NoSuchObjectException e) {
return noMatch(partKey, "partition");
}
@SuppressWarnings("deprecation") HTableInterface htab = conn.getHBaseTable(PART_TABLE);
Scan scan = new Scan();
scan.addColumn(CATALOG_CF, CATALOG_COL);
scan.addFamily(STATS_CF);
scan.setStartRow(psi.keyPrefix);
scan.setStopRow(psi.endKeyPrefix);
scan.setFilter(psi.filter);
Iterator<Result> iter = htab.getScanner(scan).iterator();
if (!iter.hasNext())
return noMatch(partKey, "partition");
List<String> lines = new ArrayList<>();
while (iter.hasNext()) {
lines.add(printOnePartition(iter.next()));
}
return lines;
}
use of org.apache.hadoop.hbase.client.HTableInterface in project hive by apache.
the class HBaseReadWrite method scan.
private Iterator<Result> scan(String table, byte[] keyStart, byte[] keyEnd, byte[] colFam, byte[] colName, Filter filter) throws IOException {
HTableInterface htab = conn.getHBaseTable(table);
Scan s = new Scan();
if (keyStart != null) {
s.setStartRow(keyStart);
}
if (keyEnd != null) {
s.setStopRow(keyEnd);
}
if (colFam != null && colName != null) {
s.addColumn(colFam, colName);
}
if (filter != null) {
s.setFilter(filter);
}
ResultScanner scanner = htab.getScanner(s);
return scanner.iterator();
}
use of org.apache.hadoop.hbase.client.HTableInterface in project hive by apache.
the class HBaseReadWrite method storeFileMetadata.
@Override
public void storeFileMetadata(long fileId, ByteBuffer metadata, ByteBuffer[] addedCols, ByteBuffer[] addedVals) throws IOException, InterruptedException {
@SuppressWarnings("deprecation") HTableInterface htab = conn.getHBaseTable(FILE_METADATA_TABLE);
Put p = new Put(HBaseUtils.makeLongKey(fileId));
p.addColumn(CATALOG_CF, ByteBuffer.wrap(CATALOG_COL), HConstants.LATEST_TIMESTAMP, metadata);
assert (addedCols == null && addedVals == null) || (addedCols.length == addedVals.length);
if (addedCols != null) {
for (int i = 0; i < addedCols.length; ++i) {
p.addColumn(STATS_CF, addedCols[i], HConstants.LATEST_TIMESTAMP, addedVals[i]);
}
}
htab.put(p);
conn.flush(htab);
}
Aggregations