Search in sources :

Example 1 with Row

use of org.apache.hadoop.hbase.client.Row in project hive by apache.

the class HBaseReadWrite method storeFileMetadata.

/**
   * @param fileIds file ID list.
   * @param metadataBuffers Serialized file metadatas, one per file ID.
   * @param addedCols The column names for additional columns created by file-format-specific
   *                  metadata handler, to be stored in the cache.
   * @param addedVals The values for addedCols; one value per file ID per added column.
   */
@Override
public void storeFileMetadata(List<Long> fileIds, List<ByteBuffer> metadataBuffers, ByteBuffer[] addedCols, ByteBuffer[][] addedVals) throws IOException, InterruptedException {
    byte[][] keys = new byte[fileIds.size()][];
    for (int i = 0; i < fileIds.size(); ++i) {
        keys[i] = HBaseUtils.makeLongKey(fileIds.get(i));
    }
    // HBase APIs are weird. To supply bytebuffer value, you have to also have bytebuffer
    // column name, but not column family. So there. Perhaps we should add these to constants too.
    ByteBuffer colNameBuf = ByteBuffer.wrap(CATALOG_COL);
    @SuppressWarnings("deprecation") HTableInterface htab = conn.getHBaseTable(FILE_METADATA_TABLE);
    List<Row> actions = new ArrayList<>(keys.length);
    for (int keyIx = 0; keyIx < keys.length; ++keyIx) {
        ByteBuffer value = (metadataBuffers != null) ? metadataBuffers.get(keyIx) : null;
        ByteBuffer[] av = addedVals == null ? null : addedVals[keyIx];
        if (value == null) {
            actions.add(new Delete(keys[keyIx]));
            assert av == null;
        } else {
            Put p = new Put(keys[keyIx]);
            p.addColumn(CATALOG_CF, colNameBuf, HConstants.LATEST_TIMESTAMP, value);
            if (av != null) {
                assert av.length == addedCols.length;
                for (int colIx = 0; colIx < addedCols.length; ++colIx) {
                    p.addColumn(STATS_CF, addedCols[colIx], HConstants.LATEST_TIMESTAMP, av[colIx]);
                }
            }
            actions.add(p);
        }
    }
    Object[] results = new Object[keys.length];
    htab.batch(actions, results);
    // TODO: should we check results array? we don't care about partial results
    conn.flush(htab);
}
Also used : Delete(org.apache.hadoop.hbase.client.Delete) ArrayList(java.util.ArrayList) HTableInterface(org.apache.hadoop.hbase.client.HTableInterface) ByteBuffer(java.nio.ByteBuffer) Put(org.apache.hadoop.hbase.client.Put) Row(org.apache.hadoop.hbase.client.Row)

Example 2 with Row

use of org.apache.hadoop.hbase.client.Row in project hive by apache.

the class HBaseReadWrite method multiModify.

private void multiModify(String table, byte[][] keys, byte[] colFam, byte[] colName, List<ByteBuffer> values) throws IOException, InterruptedException {
    assert values == null || keys.length == values.size();
    // HBase APIs are weird. To supply bytebuffer value, you have to also have bytebuffer
    // column name, but not column family. So there. Perhaps we should add these to constants too.
    ByteBuffer colNameBuf = ByteBuffer.wrap(colName);
    @SuppressWarnings("deprecation") HTableInterface htab = conn.getHBaseTable(table);
    List<Row> actions = new ArrayList<>(keys.length);
    for (int i = 0; i < keys.length; ++i) {
        ByteBuffer value = (values != null) ? values.get(i) : null;
        if (value == null) {
            actions.add(new Delete(keys[i]));
        } else {
            Put p = new Put(keys[i]);
            p.addColumn(colFam, colNameBuf, HConstants.LATEST_TIMESTAMP, value);
            actions.add(p);
        }
    }
    Object[] results = new Object[keys.length];
    htab.batch(actions, results);
    // TODO: should we check results array? we don't care about partial results
    conn.flush(htab);
}
Also used : Delete(org.apache.hadoop.hbase.client.Delete) ArrayList(java.util.ArrayList) Row(org.apache.hadoop.hbase.client.Row) HTableInterface(org.apache.hadoop.hbase.client.HTableInterface) ByteBuffer(java.nio.ByteBuffer) Put(org.apache.hadoop.hbase.client.Put)

Example 3 with Row

use of org.apache.hadoop.hbase.client.Row in project hbase by apache.

the class RequestConverter method buildRegionAction.

/**
   * Create a protocol buffer multi request for a list of actions.
   * Propagates Actions original index.
   *
   * @param regionName
   * @param actions
   * @return a multi request
   * @throws IOException
   */
public static RegionAction.Builder buildRegionAction(final byte[] regionName, final List<Action> actions, final RegionAction.Builder regionActionBuilder, final ClientProtos.Action.Builder actionBuilder, final MutationProto.Builder mutationBuilder) throws IOException {
    ClientProtos.CoprocessorServiceCall.Builder cpBuilder = null;
    for (Action action : actions) {
        Row row = action.getAction();
        actionBuilder.clear();
        actionBuilder.setIndex(action.getOriginalIndex());
        mutationBuilder.clear();
        if (row instanceof Get) {
            Get g = (Get) row;
            regionActionBuilder.addAction(actionBuilder.setGet(ProtobufUtil.toGet(g)));
        } else if (row instanceof Put) {
            regionActionBuilder.addAction(actionBuilder.setMutation(ProtobufUtil.toMutation(MutationType.PUT, (Put) row, mutationBuilder)));
        } else if (row instanceof Delete) {
            regionActionBuilder.addAction(actionBuilder.setMutation(ProtobufUtil.toMutation(MutationType.DELETE, (Delete) row, mutationBuilder)));
        } else if (row instanceof Append) {
            regionActionBuilder.addAction(actionBuilder.setMutation(ProtobufUtil.toMutation(MutationType.APPEND, (Append) row, mutationBuilder, action.getNonce())));
        } else if (row instanceof Increment) {
            regionActionBuilder.addAction(actionBuilder.setMutation(ProtobufUtil.toMutation((Increment) row, mutationBuilder, action.getNonce())));
        } else if (row instanceof RegionCoprocessorServiceExec) {
            RegionCoprocessorServiceExec exec = (RegionCoprocessorServiceExec) row;
            // DUMB COPY!!! FIX!!! Done to copy from c.g.p.ByteString to shaded ByteString.
            org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString value = org.apache.hadoop.hbase.shaded.com.google.protobuf.UnsafeByteOperations.unsafeWrap(exec.getRequest().toByteArray());
            if (cpBuilder == null) {
                cpBuilder = ClientProtos.CoprocessorServiceCall.newBuilder();
            } else {
                cpBuilder.clear();
            }
            regionActionBuilder.addAction(actionBuilder.setServiceCall(cpBuilder.setRow(UnsafeByteOperations.unsafeWrap(exec.getRow())).setServiceName(exec.getMethod().getService().getFullName()).setMethodName(exec.getMethod().getName()).setRequest(value)));
        } else if (row instanceof RowMutations) {
            throw new UnsupportedOperationException("No RowMutations in multi calls; use mutateRow");
        } else {
            throw new DoNotRetryIOException("Multi doesn't support " + row.getClass().getName());
        }
    }
    return regionActionBuilder;
}
Also used : Delete(org.apache.hadoop.hbase.client.Delete) Action(org.apache.hadoop.hbase.client.Action) RegionAction(org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.RegionAction) DoNotRetryIOException(org.apache.hadoop.hbase.DoNotRetryIOException) Put(org.apache.hadoop.hbase.client.Put) RegionCoprocessorServiceExec(org.apache.hadoop.hbase.client.RegionCoprocessorServiceExec) RowMutations(org.apache.hadoop.hbase.client.RowMutations) Append(org.apache.hadoop.hbase.client.Append) Get(org.apache.hadoop.hbase.client.Get) Increment(org.apache.hadoop.hbase.client.Increment) Row(org.apache.hadoop.hbase.client.Row)

Example 4 with Row

use of org.apache.hadoop.hbase.client.Row in project phoenix by apache.

the class DynamicFamilyIT method initTableValues.

@SuppressWarnings("deprecation")
private static void initTableValues() throws Exception {
    ConnectionQueryServices services = driver.getConnectionQueryServices(getUrl(), PropertiesUtil.deepCopy(TEST_PROPERTIES));
    HTableInterface hTable = services.getTable(SchemaUtil.getTableNameAsBytes(WEB_STATS_SCHEMA_NAME, WEB_STATS));
    try {
        // Insert rows using standard HBase mechanism with standard HBase "types"
        Put put;
        List<Row> mutations = new ArrayList<Row>();
        put = new Put(Bytes.toBytes("entry1"));
        put.add(A_CF, QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES, ByteUtil.EMPTY_BYTE_ARRAY);
        put.add(A_CF, ByteUtil.concat(MAX_CLICK_COUNT_DYNCOL_PREFIX, USER_ID2_BYTES), PInteger.INSTANCE.toBytes(ENTRY1_CLICK_COUNT));
        put.add(B_CF, ByteUtil.concat(LAST_LOGIN_TIME_DYNCOL_PREFIX, USER_ID1_BYTES), PTime.INSTANCE.toBytes(ENTRY1_USER_ID1_LOGIN_TIME));
        put.add(B_CF, ByteUtil.concat(LAST_LOGIN_TIME_DYNCOL_PREFIX, USER_ID2_BYTES), PTime.INSTANCE.toBytes(ENTRY1_USER_ID2_LOGIN_TIME));
        mutations.add(put);
        put = new Put(Bytes.toBytes("entry2"));
        put.add(A_CF, QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES, ByteUtil.EMPTY_BYTE_ARRAY);
        put.add(A_CF, ByteUtil.concat(MAX_CLICK_COUNT_DYNCOL_PREFIX, USER_ID3_BYTES), PInteger.INSTANCE.toBytes(ENTRY2_CLICK_COUNT));
        put.add(B_CF, ByteUtil.concat(LAST_LOGIN_TIME_DYNCOL_PREFIX, USER_ID2_BYTES), PTime.INSTANCE.toBytes(ENTRY2_USER_ID2_LOGIN_TIME));
        put.add(B_CF, ByteUtil.concat(LAST_LOGIN_TIME_DYNCOL_PREFIX, USER_ID3_BYTES), PTime.INSTANCE.toBytes(ENTRY2_USER_ID3_LOGIN_TIME));
        mutations.add(put);
        put = new Put(Bytes.toBytes("entry3"));
        put.add(A_CF, QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES, ByteUtil.EMPTY_BYTE_ARRAY);
        put.add(A_CF, ByteUtil.concat(MAX_CLICK_COUNT_DYNCOL_PREFIX, USER_ID1_BYTES), PInteger.INSTANCE.toBytes(ENTRY3_CLICK_COUNT));
        put.add(B_CF, ByteUtil.concat(LAST_LOGIN_TIME_DYNCOL_PREFIX, USER_ID1_BYTES), PTime.INSTANCE.toBytes(ENTRY3_USER_ID1_LOGIN_TIME));
        put.add(B_CF, ByteUtil.concat(LAST_LOGIN_TIME_DYNCOL_PREFIX, USER_ID2_BYTES), PTime.INSTANCE.toBytes(ENTRY3_USER_ID2_LOGIN_TIME));
        put.add(B_CF, ByteUtil.concat(LAST_LOGIN_TIME_DYNCOL_PREFIX, USER_ID3_BYTES), PTime.INSTANCE.toBytes(ENTRY3_USER_ID3_LOGIN_TIME));
        mutations.add(put);
        hTable.batch(mutations);
    } finally {
        hTable.close();
    }
}
Also used : ArrayList(java.util.ArrayList) Row(org.apache.hadoop.hbase.client.Row) HTableInterface(org.apache.hadoop.hbase.client.HTableInterface) ConnectionQueryServices(org.apache.phoenix.query.ConnectionQueryServices) Put(org.apache.hadoop.hbase.client.Put)

Example 5 with Row

use of org.apache.hadoop.hbase.client.Row in project phoenix by apache.

the class NativeHBaseTypesIT method initTableValues.

@SuppressWarnings("deprecation")
private String initTableValues() throws Exception {
    final String tableName = SchemaUtil.getTableName(generateUniqueName(), generateUniqueName());
    final byte[] tableBytes = tableName.getBytes();
    final byte[] familyName = Bytes.toBytes(SchemaUtil.normalizeIdentifier("1"));
    final byte[][] splits = new byte[][] { Bytes.toBytes(20), Bytes.toBytes(30) };
    HBaseAdmin admin = driver.getConnectionQueryServices(getUrl(), PropertiesUtil.deepCopy(TEST_PROPERTIES)).getAdmin();
    try {
        HTableDescriptor descriptor = new HTableDescriptor(tableBytes);
        HColumnDescriptor columnDescriptor = new HColumnDescriptor(familyName);
        columnDescriptor.setKeepDeletedCells(true);
        descriptor.addFamily(columnDescriptor);
        admin.createTable(descriptor, splits);
    } finally {
        admin.close();
    }
    ConnectionQueryServices services = driver.getConnectionQueryServices(getUrl(), PropertiesUtil.deepCopy(TEST_PROPERTIES));
    HTableInterface hTable = services.getTable(tableBytes);
    try {
        // Insert rows using standard HBase mechanism with standard HBase "types"
        List<Row> mutations = new ArrayList<Row>();
        byte[] family = Bytes.toBytes("1");
        byte[] uintCol = Bytes.toBytes("UINT_COL");
        byte[] ulongCol = Bytes.toBytes("ULONG_COL");
        byte[] key, bKey;
        Put put;
        bKey = key = ByteUtil.concat(Bytes.toBytes(20), Bytes.toBytes(200L), Bytes.toBytes("b"));
        put = new Put(key);
        put.add(family, uintCol, HConstants.LATEST_TIMESTAMP, Bytes.toBytes(5000));
        put.add(family, ulongCol, HConstants.LATEST_TIMESTAMP, Bytes.toBytes(50000L));
        mutations.add(put);
        // FIXME: the version of the Delete constructor without the lock args was introduced
        // in 0.94.4, thus if we try to use it here we can no longer use the 0.94.2 version
        // of the client.
        long ts = EnvironmentEdgeManager.currentTimeMillis();
        Delete del = new Delete(key, ts);
        mutations.add(del);
        put = new Put(key);
        put.add(family, uintCol, HConstants.LATEST_TIMESTAMP, Bytes.toBytes(2000));
        put.add(family, ulongCol, HConstants.LATEST_TIMESTAMP, Bytes.toBytes(20000L));
        mutations.add(put);
        key = ByteUtil.concat(Bytes.toBytes(10), Bytes.toBytes(100L), Bytes.toBytes("a"));
        put = new Put(key);
        put.add(family, uintCol, HConstants.LATEST_TIMESTAMP, Bytes.toBytes(5));
        put.add(family, ulongCol, HConstants.LATEST_TIMESTAMP, Bytes.toBytes(50L));
        mutations.add(put);
        put = new Put(key);
        put.add(family, uintCol, HConstants.LATEST_TIMESTAMP, Bytes.toBytes(10));
        put.add(family, ulongCol, HConstants.LATEST_TIMESTAMP, Bytes.toBytes(100L));
        mutations.add(put);
        key = ByteUtil.concat(Bytes.toBytes(30), Bytes.toBytes(300L), Bytes.toBytes("c"));
        put = new Put(key);
        put.add(family, uintCol, HConstants.LATEST_TIMESTAMP, Bytes.toBytes(3000));
        put.add(family, ulongCol, HConstants.LATEST_TIMESTAMP, Bytes.toBytes(30000L));
        mutations.add(put);
        key = ByteUtil.concat(Bytes.toBytes(40), Bytes.toBytes(400L), Bytes.toBytes("d"));
        put = new Put(key);
        put.add(family, uintCol, HConstants.LATEST_TIMESTAMP, Bytes.toBytes(4000));
        put.add(family, ulongCol, HConstants.LATEST_TIMESTAMP, Bytes.toBytes(40000L));
        mutations.add(put);
        hTable.batch(mutations);
        Result r = hTable.get(new Get(bKey));
        assertFalse(r.isEmpty());
    } finally {
        hTable.close();
    }
    // Create Phoenix table after HBase table was created through the native APIs
    // The timestamp of the table creation must be later than the timestamp of the data
    String ddl = "create table " + tableName + "   (uint_key unsigned_int not null," + "    ulong_key unsigned_long not null," + "    string_key varchar not null,\n" + "    \"1\".uint_col unsigned_int," + "    \"1\".ulong_col unsigned_long" + "    CONSTRAINT pk PRIMARY KEY (uint_key, ulong_key, string_key))\n" + HColumnDescriptor.DATA_BLOCK_ENCODING + "='" + DataBlockEncoding.NONE + "'";
    try (Connection conn = DriverManager.getConnection(url)) {
        conn.createStatement().execute(ddl);
    }
    return tableName;
}
Also used : Delete(org.apache.hadoop.hbase.client.Delete) HColumnDescriptor(org.apache.hadoop.hbase.HColumnDescriptor) ArrayList(java.util.ArrayList) Connection(java.sql.Connection) PhoenixConnection(org.apache.phoenix.jdbc.PhoenixConnection) HTableInterface(org.apache.hadoop.hbase.client.HTableInterface) Put(org.apache.hadoop.hbase.client.Put) HTableDescriptor(org.apache.hadoop.hbase.HTableDescriptor) Result(org.apache.hadoop.hbase.client.Result) HBaseAdmin(org.apache.hadoop.hbase.client.HBaseAdmin) Get(org.apache.hadoop.hbase.client.Get) Row(org.apache.hadoop.hbase.client.Row) ConnectionQueryServices(org.apache.phoenix.query.ConnectionQueryServices)

Aggregations

Row (org.apache.hadoop.hbase.client.Row)20 Put (org.apache.hadoop.hbase.client.Put)16 ArrayList (java.util.ArrayList)14 Delete (org.apache.hadoop.hbase.client.Delete)12 HTableInterface (org.apache.hadoop.hbase.client.HTableInterface)7 IOException (java.io.IOException)5 Append (org.apache.hadoop.hbase.client.Append)4 Get (org.apache.hadoop.hbase.client.Get)4 Increment (org.apache.hadoop.hbase.client.Increment)4 Pair (org.apache.hadoop.hbase.util.Pair)4 List (java.util.List)3 DoNotRetryIOException (org.apache.hadoop.hbase.DoNotRetryIOException)3 Action (org.apache.hadoop.hbase.client.Action)3 RegionCoprocessorServiceExec (org.apache.hadoop.hbase.client.RegionCoprocessorServiceExec)3 Result (org.apache.hadoop.hbase.client.Result)3 RowMutations (org.apache.hadoop.hbase.client.RowMutations)3 Table (org.apache.hadoop.hbase.client.Table)3 RegionAction (org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.RegionAction)3 PhoenixConnection (org.apache.phoenix.jdbc.PhoenixConnection)3 ConnectionQueryServices (org.apache.phoenix.query.ConnectionQueryServices)3