Search in sources :

Example 1 with Put

use of org.apache.hadoop.hbase.client.Put in project hive by apache.

the class HBaseReadWrite method store.

/**********************************************************************************************
   * General access methods
   *********************************************************************************************/
private void store(String table, byte[] key, byte[] colFam, byte[] colName, byte[] obj) throws IOException {
    HTableInterface htab = conn.getHBaseTable(table);
    Put p = new Put(key);
    p.add(colFam, colName, obj);
    htab.put(p);
    conn.flush(htab);
}
Also used : HTableInterface(org.apache.hadoop.hbase.client.HTableInterface) Put(org.apache.hadoop.hbase.client.Put)

Example 2 with Put

use of org.apache.hadoop.hbase.client.Put in project hive by apache.

the class HBaseReadWrite method storeFileMetadata.

/**
   * @param fileIds file ID list.
   * @param metadataBuffers Serialized file metadatas, one per file ID.
   * @param addedCols The column names for additional columns created by file-format-specific
   *                  metadata handler, to be stored in the cache.
   * @param addedVals The values for addedCols; one value per file ID per added column.
   */
@Override
public void storeFileMetadata(List<Long> fileIds, List<ByteBuffer> metadataBuffers, ByteBuffer[] addedCols, ByteBuffer[][] addedVals) throws IOException, InterruptedException {
    byte[][] keys = new byte[fileIds.size()][];
    for (int i = 0; i < fileIds.size(); ++i) {
        keys[i] = HBaseUtils.makeLongKey(fileIds.get(i));
    }
    // HBase APIs are weird. To supply bytebuffer value, you have to also have bytebuffer
    // column name, but not column family. So there. Perhaps we should add these to constants too.
    ByteBuffer colNameBuf = ByteBuffer.wrap(CATALOG_COL);
    @SuppressWarnings("deprecation") HTableInterface htab = conn.getHBaseTable(FILE_METADATA_TABLE);
    List<Row> actions = new ArrayList<>(keys.length);
    for (int keyIx = 0; keyIx < keys.length; ++keyIx) {
        ByteBuffer value = (metadataBuffers != null) ? metadataBuffers.get(keyIx) : null;
        ByteBuffer[] av = addedVals == null ? null : addedVals[keyIx];
        if (value == null) {
            actions.add(new Delete(keys[keyIx]));
            assert av == null;
        } else {
            Put p = new Put(keys[keyIx]);
            p.addColumn(CATALOG_CF, colNameBuf, HConstants.LATEST_TIMESTAMP, value);
            if (av != null) {
                assert av.length == addedCols.length;
                for (int colIx = 0; colIx < addedCols.length; ++colIx) {
                    p.addColumn(STATS_CF, addedCols[colIx], HConstants.LATEST_TIMESTAMP, av[colIx]);
                }
            }
            actions.add(p);
        }
    }
    Object[] results = new Object[keys.length];
    htab.batch(actions, results);
    // TODO: should we check results array? we don't care about partial results
    conn.flush(htab);
}
Also used : Delete(org.apache.hadoop.hbase.client.Delete) ArrayList(java.util.ArrayList) HTableInterface(org.apache.hadoop.hbase.client.HTableInterface) ByteBuffer(java.nio.ByteBuffer) Put(org.apache.hadoop.hbase.client.Put) Row(org.apache.hadoop.hbase.client.Row)

Example 3 with Put

use of org.apache.hadoop.hbase.client.Put in project hive by apache.

the class HBaseReadWrite method decrementStorageDescriptorRefCount.

/**
   * Lower the reference count on the storage descriptor by one.  If it goes to zero, then it
   * will be deleted.
   * @param sd Storage descriptor
   * @throws IOException
   */
void decrementStorageDescriptorRefCount(StorageDescriptor sd) throws IOException {
    byte[] key = HBaseUtils.hashStorageDescriptor(sd, md);
    byte[] serializedRefCnt = read(SD_TABLE, key, CATALOG_CF, REF_COUNT_COL);
    if (serializedRefCnt == null) {
        // Someone deleted it before we got to it, no worries
        return;
    }
    int refCnt = Integer.parseInt(new String(serializedRefCnt, HBaseUtils.ENCODING));
    HTableInterface htab = conn.getHBaseTable(SD_TABLE);
    if (--refCnt < 1) {
        Delete d = new Delete(key);
        // We don't use checkAndDelete here because it isn't compatible with the transaction
        // managers.  If the transaction managers are doing their jobs then we should not need it
        // anyway.
        htab.delete(d);
        sdCache.remove(new ByteArrayWrapper(key));
    } else {
        Put p = new Put(key);
        p.add(CATALOG_CF, REF_COUNT_COL, Integer.toString(refCnt).getBytes(HBaseUtils.ENCODING));
        htab.put(p);
        conn.flush(htab);
    }
}
Also used : Delete(org.apache.hadoop.hbase.client.Delete) HTableInterface(org.apache.hadoop.hbase.client.HTableInterface) Put(org.apache.hadoop.hbase.client.Put)

Example 4 with Put

use of org.apache.hadoop.hbase.client.Put in project hive by apache.

the class HBaseReadWrite method multiModify.

private void multiModify(String table, byte[][] keys, byte[] colFam, byte[] colName, List<ByteBuffer> values) throws IOException, InterruptedException {
    assert values == null || keys.length == values.size();
    // HBase APIs are weird. To supply bytebuffer value, you have to also have bytebuffer
    // column name, but not column family. So there. Perhaps we should add these to constants too.
    ByteBuffer colNameBuf = ByteBuffer.wrap(colName);
    @SuppressWarnings("deprecation") HTableInterface htab = conn.getHBaseTable(table);
    List<Row> actions = new ArrayList<>(keys.length);
    for (int i = 0; i < keys.length; ++i) {
        ByteBuffer value = (values != null) ? values.get(i) : null;
        if (value == null) {
            actions.add(new Delete(keys[i]));
        } else {
            Put p = new Put(keys[i]);
            p.addColumn(colFam, colNameBuf, HConstants.LATEST_TIMESTAMP, value);
            actions.add(p);
        }
    }
    Object[] results = new Object[keys.length];
    htab.batch(actions, results);
    // TODO: should we check results array? we don't care about partial results
    conn.flush(htab);
}
Also used : Delete(org.apache.hadoop.hbase.client.Delete) ArrayList(java.util.ArrayList) Row(org.apache.hadoop.hbase.client.Row) HTableInterface(org.apache.hadoop.hbase.client.HTableInterface) ByteBuffer(java.nio.ByteBuffer) Put(org.apache.hadoop.hbase.client.Put)

Example 5 with Put

use of org.apache.hadoop.hbase.client.Put in project hive by apache.

the class HBaseReadWrite method storeFileMetadata.

@Override
public void storeFileMetadata(long fileId, ByteBuffer metadata, ByteBuffer[] addedCols, ByteBuffer[] addedVals) throws IOException, InterruptedException {
    @SuppressWarnings("deprecation") HTableInterface htab = conn.getHBaseTable(FILE_METADATA_TABLE);
    Put p = new Put(HBaseUtils.makeLongKey(fileId));
    p.addColumn(CATALOG_CF, ByteBuffer.wrap(CATALOG_COL), HConstants.LATEST_TIMESTAMP, metadata);
    assert (addedCols == null && addedVals == null) || (addedCols.length == addedVals.length);
    if (addedCols != null) {
        for (int i = 0; i < addedCols.length; ++i) {
            p.addColumn(STATS_CF, addedCols[i], HConstants.LATEST_TIMESTAMP, addedVals[i]);
        }
    }
    htab.put(p);
    conn.flush(htab);
}
Also used : HTableInterface(org.apache.hadoop.hbase.client.HTableInterface) Put(org.apache.hadoop.hbase.client.Put)

Aggregations

Put (org.apache.hadoop.hbase.client.Put)1416 Test (org.junit.Test)672 Table (org.apache.hadoop.hbase.client.Table)489 ArrayList (java.util.ArrayList)317 Result (org.apache.hadoop.hbase.client.Result)279 TableName (org.apache.hadoop.hbase.TableName)257 IOException (java.io.IOException)241 Delete (org.apache.hadoop.hbase.client.Delete)225 Scan (org.apache.hadoop.hbase.client.Scan)222 Cell (org.apache.hadoop.hbase.Cell)200 Get (org.apache.hadoop.hbase.client.Get)196 Configuration (org.apache.hadoop.conf.Configuration)148 TableDescriptor (org.apache.hadoop.hbase.client.TableDescriptor)139 Connection (org.apache.hadoop.hbase.client.Connection)122 KeyValue (org.apache.hadoop.hbase.KeyValue)112 ResultScanner (org.apache.hadoop.hbase.client.ResultScanner)110 Admin (org.apache.hadoop.hbase.client.Admin)89 List (java.util.List)83 Mutation (org.apache.hadoop.hbase.client.Mutation)82 RegionInfo (org.apache.hadoop.hbase.client.RegionInfo)80