use of org.apache.hadoop.hbase.client.HTableInterface in project hive by apache.
the class HBaseReadWrite method multiModify.
private void multiModify(String table, byte[][] keys, byte[] colFam, byte[] colName, List<ByteBuffer> values) throws IOException, InterruptedException {
assert values == null || keys.length == values.size();
// HBase APIs are weird. To supply bytebuffer value, you have to also have bytebuffer
// column name, but not column family. So there. Perhaps we should add these to constants too.
ByteBuffer colNameBuf = ByteBuffer.wrap(colName);
@SuppressWarnings("deprecation") HTableInterface htab = conn.getHBaseTable(table);
List<Row> actions = new ArrayList<>(keys.length);
for (int i = 0; i < keys.length; ++i) {
ByteBuffer value = (values != null) ? values.get(i) : null;
if (value == null) {
actions.add(new Delete(keys[i]));
} else {
Put p = new Put(keys[i]);
p.addColumn(colFam, colNameBuf, HConstants.LATEST_TIMESTAMP, value);
actions.add(p);
}
}
Object[] results = new Object[keys.length];
htab.batch(actions, results);
// TODO: should we check results array? we don't care about partial results
conn.flush(htab);
}
use of org.apache.hadoop.hbase.client.HTableInterface in project hive by apache.
the class HBaseReadWrite method read.
private Result read(String table, byte[] key, byte[] colFam, byte[][] colNames) throws IOException {
HTableInterface htab = conn.getHBaseTable(table);
Get g = new Get(key);
for (byte[] colName : colNames) g.addColumn(colFam, colName);
return htab.get(g);
}
use of org.apache.hadoop.hbase.client.HTableInterface in project hive by apache.
the class HBaseReadWrite method multiRead.
private void multiRead(String table, byte[] colFam, byte[] colName, byte[][] keys, ByteBuffer[] resultDest) throws IOException {
assert keys.length == resultDest.length;
@SuppressWarnings("deprecation") HTableInterface htab = conn.getHBaseTable(table);
List<Get> gets = new ArrayList<>(keys.length);
for (byte[] key : keys) {
Get g = new Get(key);
g.addColumn(colFam, colName);
gets.add(g);
}
Result[] results = htab.get(gets);
for (int i = 0; i < results.length; ++i) {
Result r = results[i];
if (r.isEmpty()) {
resultDest[i] = null;
} else {
Cell cell = r.getColumnLatestCell(colFam, colName);
resultDest[i] = ByteBuffer.wrap(cell.getValueArray(), cell.getValueOffset(), cell.getValueLength());
}
}
}
use of org.apache.hadoop.hbase.client.HTableInterface in project hive by apache.
the class HBaseReadWrite method printTables.
/**
* Print tables
* @param regex to use to find the tables. Remember that dbname is in each
* table name.
* @return tables as strings
* @throws IOException
* @throws TException
*/
List<String> printTables(String regex) throws IOException, TException {
Filter filter = new RowFilter(CompareFilter.CompareOp.EQUAL, new RegexStringComparator(regex));
@SuppressWarnings("deprecation") HTableInterface htab = conn.getHBaseTable(TABLE_TABLE);
Scan scan = new Scan();
scan.addColumn(CATALOG_CF, CATALOG_COL);
scan.addFamily(STATS_CF);
scan.setFilter(filter);
Iterator<Result> iter = htab.getScanner(scan).iterator();
if (!iter.hasNext())
return noMatch(regex, "table");
List<String> lines = new ArrayList<>();
while (iter.hasNext()) {
lines.add(printOneTable(iter.next()));
}
return lines;
}
use of org.apache.hadoop.hbase.client.HTableInterface in project hive by apache.
the class HBaseReadWrite method printPartitions.
/**
* Print partitions
* @param partKey a partial partition key. This must match the beginings of the partition key.
* It can be just dbname.tablename, or dbname.table.pval... where pval are the
* partition values in order. They must be in the correct order and they must
* be literal values (no regular expressions)
* @return partitions as strings
* @throws IOException
* @throws TException
*/
List<String> printPartitions(String partKey) throws IOException, TException {
// First figure out the table and fetch it
// Split on dot here rather than the standard separator because this will be passed in as a
// regex, even though we aren't fully supporting regex's.
String[] partKeyParts = partKey.split("\\.");
if (partKeyParts.length < 2)
return noMatch(partKey, "partition");
List<String> partVals = partKeyParts.length == 2 ? Arrays.asList("*") : Arrays.asList(Arrays.copyOfRange(partKeyParts, 2, partKeyParts.length));
PartitionScanInfo psi;
try {
psi = scanPartitionsInternal(partKeyParts[0], partKeyParts[1], partVals, -1);
} catch (NoSuchObjectException e) {
return noMatch(partKey, "partition");
}
@SuppressWarnings("deprecation") HTableInterface htab = conn.getHBaseTable(PART_TABLE);
Scan scan = new Scan();
scan.addColumn(CATALOG_CF, CATALOG_COL);
scan.addFamily(STATS_CF);
scan.setStartRow(psi.keyPrefix);
scan.setStopRow(psi.endKeyPrefix);
scan.setFilter(psi.filter);
Iterator<Result> iter = htab.getScanner(scan).iterator();
if (!iter.hasNext())
return noMatch(partKey, "partition");
List<String> lines = new ArrayList<>();
while (iter.hasNext()) {
lines.add(printOnePartition(iter.next()));
}
return lines;
}
Aggregations