use of org.apache.hadoop.hbase.client.Put in project hive by apache.
the class HBaseReadWrite method store.
/**********************************************************************************************
* General access methods
*********************************************************************************************/
private void store(String table, byte[] key, byte[] colFam, byte[] colName, byte[] obj) throws IOException {
HTableInterface htab = conn.getHBaseTable(table);
Put p = new Put(key);
p.add(colFam, colName, obj);
htab.put(p);
conn.flush(htab);
}
use of org.apache.hadoop.hbase.client.Put in project hive by apache.
the class HBaseReadWrite method storeFileMetadata.
/**
* @param fileIds file ID list.
* @param metadataBuffers Serialized file metadatas, one per file ID.
* @param addedCols The column names for additional columns created by file-format-specific
* metadata handler, to be stored in the cache.
* @param addedVals The values for addedCols; one value per file ID per added column.
*/
@Override
public void storeFileMetadata(List<Long> fileIds, List<ByteBuffer> metadataBuffers, ByteBuffer[] addedCols, ByteBuffer[][] addedVals) throws IOException, InterruptedException {
byte[][] keys = new byte[fileIds.size()][];
for (int i = 0; i < fileIds.size(); ++i) {
keys[i] = HBaseUtils.makeLongKey(fileIds.get(i));
}
// HBase APIs are weird. To supply bytebuffer value, you have to also have bytebuffer
// column name, but not column family. So there. Perhaps we should add these to constants too.
ByteBuffer colNameBuf = ByteBuffer.wrap(CATALOG_COL);
@SuppressWarnings("deprecation") HTableInterface htab = conn.getHBaseTable(FILE_METADATA_TABLE);
List<Row> actions = new ArrayList<>(keys.length);
for (int keyIx = 0; keyIx < keys.length; ++keyIx) {
ByteBuffer value = (metadataBuffers != null) ? metadataBuffers.get(keyIx) : null;
ByteBuffer[] av = addedVals == null ? null : addedVals[keyIx];
if (value == null) {
actions.add(new Delete(keys[keyIx]));
assert av == null;
} else {
Put p = new Put(keys[keyIx]);
p.addColumn(CATALOG_CF, colNameBuf, HConstants.LATEST_TIMESTAMP, value);
if (av != null) {
assert av.length == addedCols.length;
for (int colIx = 0; colIx < addedCols.length; ++colIx) {
p.addColumn(STATS_CF, addedCols[colIx], HConstants.LATEST_TIMESTAMP, av[colIx]);
}
}
actions.add(p);
}
}
Object[] results = new Object[keys.length];
htab.batch(actions, results);
// TODO: should we check results array? we don't care about partial results
conn.flush(htab);
}
use of org.apache.hadoop.hbase.client.Put in project hive by apache.
the class HBaseReadWrite method decrementStorageDescriptorRefCount.
/**
* Lower the reference count on the storage descriptor by one. If it goes to zero, then it
* will be deleted.
* @param sd Storage descriptor
* @throws IOException
*/
void decrementStorageDescriptorRefCount(StorageDescriptor sd) throws IOException {
byte[] key = HBaseUtils.hashStorageDescriptor(sd, md);
byte[] serializedRefCnt = read(SD_TABLE, key, CATALOG_CF, REF_COUNT_COL);
if (serializedRefCnt == null) {
// Someone deleted it before we got to it, no worries
return;
}
int refCnt = Integer.parseInt(new String(serializedRefCnt, HBaseUtils.ENCODING));
HTableInterface htab = conn.getHBaseTable(SD_TABLE);
if (--refCnt < 1) {
Delete d = new Delete(key);
// We don't use checkAndDelete here because it isn't compatible with the transaction
// managers. If the transaction managers are doing their jobs then we should not need it
// anyway.
htab.delete(d);
sdCache.remove(new ByteArrayWrapper(key));
} else {
Put p = new Put(key);
p.add(CATALOG_CF, REF_COUNT_COL, Integer.toString(refCnt).getBytes(HBaseUtils.ENCODING));
htab.put(p);
conn.flush(htab);
}
}
use of org.apache.hadoop.hbase.client.Put in project hive by apache.
the class HBaseReadWrite method multiModify.
private void multiModify(String table, byte[][] keys, byte[] colFam, byte[] colName, List<ByteBuffer> values) throws IOException, InterruptedException {
assert values == null || keys.length == values.size();
// HBase APIs are weird. To supply bytebuffer value, you have to also have bytebuffer
// column name, but not column family. So there. Perhaps we should add these to constants too.
ByteBuffer colNameBuf = ByteBuffer.wrap(colName);
@SuppressWarnings("deprecation") HTableInterface htab = conn.getHBaseTable(table);
List<Row> actions = new ArrayList<>(keys.length);
for (int i = 0; i < keys.length; ++i) {
ByteBuffer value = (values != null) ? values.get(i) : null;
if (value == null) {
actions.add(new Delete(keys[i]));
} else {
Put p = new Put(keys[i]);
p.addColumn(colFam, colNameBuf, HConstants.LATEST_TIMESTAMP, value);
actions.add(p);
}
}
Object[] results = new Object[keys.length];
htab.batch(actions, results);
// TODO: should we check results array? we don't care about partial results
conn.flush(htab);
}
use of org.apache.hadoop.hbase.client.Put in project hive by apache.
the class HBaseReadWrite method storeFileMetadata.
@Override
public void storeFileMetadata(long fileId, ByteBuffer metadata, ByteBuffer[] addedCols, ByteBuffer[] addedVals) throws IOException, InterruptedException {
@SuppressWarnings("deprecation") HTableInterface htab = conn.getHBaseTable(FILE_METADATA_TABLE);
Put p = new Put(HBaseUtils.makeLongKey(fileId));
p.addColumn(CATALOG_CF, ByteBuffer.wrap(CATALOG_COL), HConstants.LATEST_TIMESTAMP, metadata);
assert (addedCols == null && addedVals == null) || (addedCols.length == addedVals.length);
if (addedCols != null) {
for (int i = 0; i < addedCols.length; ++i) {
p.addColumn(STATS_CF, addedCols[i], HConstants.LATEST_TIMESTAMP, addedVals[i]);
}
}
htab.put(p);
conn.flush(htab);
}
Aggregations