Search in sources :

Example 1 with Operator

use of org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Operator in project hive by apache.

the class HBaseReadWrite method scanPartitionsInternal.

private PartitionScanInfo scanPartitionsInternal(String dbName, String tableName, List<String> partVals, int maxPartitions) throws IOException, NoSuchObjectException {
    // First, build as much of the key as we can so that we make the scan as tight as possible.
    List<String> keyElements = new ArrayList<>();
    keyElements.add(dbName);
    keyElements.add(tableName);
    int firstStar = -1;
    for (int i = 0; i < partVals.size(); i++) {
        if ("*".equals(partVals.get(i))) {
            firstStar = i;
            break;
        } else {
            // means star
            if (partVals.get(i).equals("")) {
                break;
            } else {
                keyElements.add(partVals.get(i));
            }
        }
    }
    byte[] keyPrefix;
    // We need to fetch the table to determine if the user fully specified the partitions or
    // not, as it affects how we build the key.
    Table table = getTable(dbName, tableName);
    if (table == null) {
        throw new NoSuchObjectException("Unable to find table " + dbName + "." + tableName);
    }
    keyPrefix = HBaseUtils.buildPartitionKey(dbName, tableName, HBaseUtils.getPartitionKeyTypes(table.getPartitionKeys().subList(0, keyElements.size() - 2)), keyElements.subList(2, keyElements.size()));
    // Now, build a filter out of the remaining keys
    List<PartitionKeyComparator.Range> ranges = new ArrayList<PartitionKeyComparator.Range>();
    List<Operator> ops = new ArrayList<Operator>();
    if (!(partVals.size() == table.getPartitionKeys().size() && firstStar == -1)) {
        for (int i = Math.max(0, firstStar); i < table.getPartitionKeys().size() && i < partVals.size(); i++) {
            if ("*".equals(partVals.get(i))) {
                PartitionKeyComparator.Operator op = new PartitionKeyComparator.Operator(PartitionKeyComparator.Operator.Type.LIKE, table.getPartitionKeys().get(i).getName(), ".*");
                ops.add(op);
            } else {
                PartitionKeyComparator.Range range = new PartitionKeyComparator.Range(table.getPartitionKeys().get(i).getName(), new PartitionKeyComparator.Mark(partVals.get(i), true), new PartitionKeyComparator.Mark(partVals.get(i), true));
                ranges.add(range);
            }
        }
    }
    Filter filter = null;
    if (!ranges.isEmpty() || !ops.isEmpty()) {
        filter = new RowFilter(CompareFilter.CompareOp.EQUAL, new PartitionKeyComparator(StringUtils.join(HBaseUtils.getPartitionNames(table.getPartitionKeys()), ","), StringUtils.join(HBaseUtils.getPartitionKeyTypes(table.getPartitionKeys()), ","), ranges, ops));
    }
    if (LOG.isDebugEnabled()) {
        LOG.debug("Scanning partitions with prefix <" + new String(keyPrefix) + "> and filter <" + filter + ">");
    }
    return new PartitionScanInfo(dbName, tableName, keyPrefix, HBaseUtils.getEndPrefix(keyPrefix), maxPartitions, filter);
}
Also used : Operator(org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Operator) Table(org.apache.hadoop.hive.metastore.api.Table) ArrayList(java.util.ArrayList) RowFilter(org.apache.hadoop.hbase.filter.RowFilter) RowFilter(org.apache.hadoop.hbase.filter.RowFilter) FirstKeyOnlyFilter(org.apache.hadoop.hbase.filter.FirstKeyOnlyFilter) Filter(org.apache.hadoop.hbase.filter.Filter) CompareFilter(org.apache.hadoop.hbase.filter.CompareFilter) BloomFilter(org.apache.hive.common.util.BloomFilter) NoSuchObjectException(org.apache.hadoop.hive.metastore.api.NoSuchObjectException) Operator(org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Operator)

Aggregations

ArrayList (java.util.ArrayList)1 CompareFilter (org.apache.hadoop.hbase.filter.CompareFilter)1 Filter (org.apache.hadoop.hbase.filter.Filter)1 FirstKeyOnlyFilter (org.apache.hadoop.hbase.filter.FirstKeyOnlyFilter)1 RowFilter (org.apache.hadoop.hbase.filter.RowFilter)1 NoSuchObjectException (org.apache.hadoop.hive.metastore.api.NoSuchObjectException)1 Table (org.apache.hadoop.hive.metastore.api.Table)1 Operator (org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Operator)1 BloomFilter (org.apache.hive.common.util.BloomFilter)1