Search in sources :

Example 66 with HTableInterface

use of org.apache.hadoop.hbase.client.HTableInterface in project hive by apache.

the class HBaseReadWrite method printTable.

/**
   * Print out a table.
   * @param name The name for the table.  This must include dbname.tablename
   * @return string containing the table
   * @throws IOException
   * @throws TException
   */
String printTable(String name) throws IOException, TException {
    byte[] key = HBaseUtils.buildKey(name);
    @SuppressWarnings("deprecation") HTableInterface htab = conn.getHBaseTable(TABLE_TABLE);
    Get g = new Get(key);
    g.addColumn(CATALOG_CF, CATALOG_COL);
    g.addFamily(STATS_CF);
    Result result = htab.get(g);
    if (result.isEmpty())
        return noSuch(name, "table");
    return printOneTable(result);
}
Also used : Get(org.apache.hadoop.hbase.client.Get) HTableInterface(org.apache.hadoop.hbase.client.HTableInterface) Result(org.apache.hadoop.hbase.client.Result)

Example 67 with HTableInterface

use of org.apache.hadoop.hbase.client.HTableInterface in project hive by apache.

the class HBaseReadWrite method getPrincipalDirectRoles.

/**
   * Find all roles directly participated in by a given principal.  This builds the role cache
   * because it assumes that subsequent calls may be made to find roles participated in indirectly.
   * @param name username or role name
   * @param type user or role
   * @return map of role name to grant info for all roles directly participated in.
   */
List<Role> getPrincipalDirectRoles(String name, PrincipalType type) throws IOException {
    buildRoleCache();
    Set<String> rolesFound = new HashSet<>();
    for (Map.Entry<String, HbaseMetastoreProto.RoleGrantInfoList> e : roleCache.entrySet()) {
        for (HbaseMetastoreProto.RoleGrantInfo giw : e.getValue().getGrantInfoList()) {
            if (HBaseUtils.convertPrincipalTypes(giw.getPrincipalType()) == type && giw.getPrincipalName().equals(name)) {
                rolesFound.add(e.getKey());
                break;
            }
        }
    }
    List<Role> directRoles = new ArrayList<>(rolesFound.size());
    List<Get> gets = new ArrayList<>();
    HTableInterface htab = conn.getHBaseTable(ROLE_TABLE);
    for (String roleFound : rolesFound) {
        byte[] key = HBaseUtils.buildKey(roleFound);
        Get g = new Get(key);
        g.addColumn(CATALOG_CF, CATALOG_COL);
        gets.add(g);
    }
    Result[] results = htab.get(gets);
    for (int i = 0; i < results.length; i++) {
        byte[] serialized = results[i].getValue(CATALOG_CF, CATALOG_COL);
        if (serialized != null) {
            directRoles.add(HBaseUtils.deserializeRole(results[i].getRow(), serialized));
        }
    }
    return directRoles;
}
Also used : ArrayList(java.util.ArrayList) HTableInterface(org.apache.hadoop.hbase.client.HTableInterface) Result(org.apache.hadoop.hbase.client.Result) Role(org.apache.hadoop.hive.metastore.api.Role) Get(org.apache.hadoop.hbase.client.Get) Map(java.util.Map) NavigableMap(java.util.NavigableMap) HashMap(java.util.HashMap) HashSet(java.util.HashSet)

Example 68 with HTableInterface

use of org.apache.hadoop.hbase.client.HTableInterface in project hive by apache.

the class HBaseReadWrite method replacePartitions.

void replacePartitions(List<Partition> oldParts, List<Partition> newParts, List<String> oldPartTypes) throws IOException {
    if (oldParts.size() != newParts.size()) {
        throw new RuntimeException("Number of old and new partitions must match.");
    }
    List<Put> puts = new ArrayList<>(newParts.size());
    for (int i = 0; i < newParts.size(); i++) {
        byte[] hash;
        byte[] oldHash = HBaseUtils.hashStorageDescriptor(oldParts.get(i).getSd(), md);
        byte[] newHash = HBaseUtils.hashStorageDescriptor(newParts.get(i).getSd(), md);
        if (Arrays.equals(oldHash, newHash)) {
            hash = oldHash;
        } else {
            decrementStorageDescriptorRefCount(oldParts.get(i).getSd());
            hash = putStorageDescriptor(newParts.get(i).getSd());
        }
        Partition newPart = newParts.get(i);
        byte[][] serialized = HBaseUtils.serializePartition(newPart, HBaseUtils.getPartitionKeyTypes(getTable(newPart.getDbName(), newPart.getTableName()).getPartitionKeys()), hash);
        Put p = new Put(serialized[0]);
        p.add(CATALOG_CF, CATALOG_COL, serialized[1]);
        puts.add(p);
        partCache.put(newParts.get(i).getDbName(), newParts.get(i).getTableName(), newParts.get(i));
        if (!newParts.get(i).getTableName().equals(oldParts.get(i).getTableName())) {
            // We need to remove the old record as well.
            deletePartition(oldParts.get(i).getDbName(), oldParts.get(i).getTableName(), oldPartTypes, oldParts.get(i).getValues(), false);
        }
    }
    HTableInterface htab = conn.getHBaseTable(PART_TABLE);
    htab.put(puts);
    conn.flush(htab);
}
Also used : Partition(org.apache.hadoop.hive.metastore.api.Partition) ArrayList(java.util.ArrayList) HTableInterface(org.apache.hadoop.hbase.client.HTableInterface) Put(org.apache.hadoop.hbase.client.Put)

Example 69 with HTableInterface

use of org.apache.hadoop.hbase.client.HTableInterface in project hive by apache.

the class HBaseReadWrite method putStorageDescriptor.

/**
   * Place the common parts of a storage descriptor into the cache and write the storage
   * descriptor out to HBase.  This should only be called if you are sure that the storage
   * descriptor needs to be added.  If you have changed a table or partition but not it's storage
   * descriptor do not call this method, as it will increment the reference count of the storage
   * descriptor.
   * @param storageDescriptor storage descriptor to store.
   * @return id of the entry in the cache, to be written in for the storage descriptor
   */
byte[] putStorageDescriptor(StorageDescriptor storageDescriptor) throws IOException {
    byte[] sd = HBaseUtils.serializeStorageDescriptor(storageDescriptor);
    byte[] key = HBaseUtils.hashStorageDescriptor(storageDescriptor, md);
    byte[] serializedRefCnt = read(SD_TABLE, key, CATALOG_CF, REF_COUNT_COL);
    HTableInterface htab = conn.getHBaseTable(SD_TABLE);
    if (serializedRefCnt == null) {
        // We are the first to put it in the DB
        Put p = new Put(key);
        p.add(CATALOG_CF, CATALOG_COL, sd);
        p.add(CATALOG_CF, REF_COUNT_COL, "1".getBytes(HBaseUtils.ENCODING));
        htab.put(p);
        sdCache.put(new ByteArrayWrapper(key), storageDescriptor);
    } else {
        // Just increment the reference count
        int refCnt = Integer.parseInt(new String(serializedRefCnt, HBaseUtils.ENCODING)) + 1;
        Put p = new Put(key);
        p.add(CATALOG_CF, REF_COUNT_COL, Integer.toString(refCnt).getBytes(HBaseUtils.ENCODING));
        htab.put(p);
    }
    conn.flush(htab);
    return key;
}
Also used : HTableInterface(org.apache.hadoop.hbase.client.HTableInterface) Put(org.apache.hadoop.hbase.client.Put)

Example 70 with HTableInterface

use of org.apache.hadoop.hbase.client.HTableInterface in project hive by apache.

the class HBaseReadWrite method printPartition.

/**
   * Print out a partition.
   * @param partKey The key for the partition.  This must include dbname.tablename._partkeys_
   *                where _partkeys_ is a dot separated list of partition values in the proper
   *                order.
   * @return string containing the partition
   * @throws IOException
   * @throws TException
   */
String printPartition(String partKey) throws IOException, TException {
    // First figure out the table and fetch it
    String[] partKeyParts = partKey.split(HBaseUtils.KEY_SEPARATOR_STR);
    if (partKeyParts.length < 3)
        return noSuch(partKey, "partition");
    Table table = getTable(partKeyParts[0], partKeyParts[1]);
    if (table == null)
        return noSuch(partKey, "partition");
    byte[] key = HBaseUtils.buildPartitionKey(partKeyParts[0], partKeyParts[1], HBaseUtils.getPartitionKeyTypes(table.getPartitionKeys()), Arrays.asList(Arrays.copyOfRange(partKeyParts, 2, partKeyParts.length)));
    @SuppressWarnings("deprecation") HTableInterface htab = conn.getHBaseTable(PART_TABLE);
    Get g = new Get(key);
    g.addColumn(CATALOG_CF, CATALOG_COL);
    g.addFamily(STATS_CF);
    Result result = htab.get(g);
    if (result.isEmpty())
        return noSuch(partKey, "partition");
    return printOnePartition(result);
}
Also used : Table(org.apache.hadoop.hive.metastore.api.Table) Get(org.apache.hadoop.hbase.client.Get) HTableInterface(org.apache.hadoop.hbase.client.HTableInterface) Result(org.apache.hadoop.hbase.client.Result)

Aggregations

HTableInterface (org.apache.hadoop.hbase.client.HTableInterface)117 Result (org.apache.hadoop.hbase.client.Result)43 Put (org.apache.hadoop.hbase.client.Put)41 IOException (java.io.IOException)36 ArrayList (java.util.ArrayList)26 PhoenixConnection (org.apache.phoenix.jdbc.PhoenixConnection)23 Get (org.apache.hadoop.hbase.client.Get)21 Scan (org.apache.hadoop.hbase.client.Scan)21 Test (org.junit.Test)20 SQLException (java.sql.SQLException)19 ResultScanner (org.apache.hadoop.hbase.client.ResultScanner)17 Connection (java.sql.Connection)15 HashMap (java.util.HashMap)15 HBaseAdmin (org.apache.hadoop.hbase.client.HBaseAdmin)13 Delete (org.apache.hadoop.hbase.client.Delete)12 Mutation (org.apache.hadoop.hbase.client.Mutation)12 PhoenixIOException (org.apache.phoenix.exception.PhoenixIOException)11 ResultSet (java.sql.ResultSet)10 Configuration (org.apache.hadoop.conf.Configuration)9 ConnectionQueryServices (org.apache.phoenix.query.ConnectionQueryServices)9