Search in sources :

Example 1 with Partition

use of org.apache.hadoop.hive.metastore.api.Partition in project hive by apache.

the class HBaseStore method getPartitions.

@Override
public List<Partition> getPartitions(String dbName, String tableName, int max) throws MetaException, NoSuchObjectException {
    boolean commit = false;
    openTransaction();
    try {
        List<Partition> parts = getHBase().scanPartitionsInTable(HiveStringUtils.normalizeIdentifier(dbName), HiveStringUtils.normalizeIdentifier(tableName), max);
        commit = true;
        return parts;
    } catch (IOException e) {
        LOG.error("Unable to get partitions", e);
        throw new MetaException("Error scanning partitions");
    } finally {
        commitOrRoleBack(commit);
    }
}
Also used : Partition(org.apache.hadoop.hive.metastore.api.Partition) IOException(java.io.IOException) MetaException(org.apache.hadoop.hive.metastore.api.MetaException)

Example 2 with Partition

use of org.apache.hadoop.hive.metastore.api.Partition in project hive by apache.

the class HBaseStore method listPartitionNames.

@Override
public List<String> listPartitionNames(String db_name, String tbl_name, short max_parts) throws MetaException {
    boolean commit = false;
    openTransaction();
    try {
        List<Partition> parts = getHBase().scanPartitionsInTable(HiveStringUtils.normalizeIdentifier(db_name), HiveStringUtils.normalizeIdentifier(tbl_name), max_parts);
        if (parts == null)
            return null;
        List<String> names = new ArrayList<String>(parts.size());
        Table table = getHBase().getTable(HiveStringUtils.normalizeIdentifier(db_name), HiveStringUtils.normalizeIdentifier(tbl_name));
        for (Partition p : parts) {
            names.add(buildExternalPartName(table, p));
        }
        commit = true;
        return names;
    } catch (IOException e) {
        LOG.error("Unable to get partitions", e);
        throw new MetaException("Error scanning partitions");
    } finally {
        commitOrRoleBack(commit);
    }
}
Also used : Partition(org.apache.hadoop.hive.metastore.api.Partition) Table(org.apache.hadoop.hive.metastore.api.Table) ArrayList(java.util.ArrayList) IOException(java.io.IOException) MetaException(org.apache.hadoop.hive.metastore.api.MetaException)

Example 3 with Partition

use of org.apache.hadoop.hive.metastore.api.Partition in project hive by apache.

the class HBaseReadWrite method getPartition.

private Partition getPartition(String dbName, String tableName, List<String> partVals, boolean populateCache) throws IOException {
    Partition cached = partCache.get(dbName, tableName, partVals);
    if (cached != null)
        return cached;
    byte[] key = HBaseUtils.buildPartitionKey(dbName, tableName, HBaseUtils.getPartitionKeyTypes(getTable(dbName, tableName).getPartitionKeys()), partVals);
    byte[] serialized = read(PART_TABLE, key, CATALOG_CF, CATALOG_COL);
    if (serialized == null)
        return null;
    HBaseUtils.StorageDescriptorParts sdParts = HBaseUtils.deserializePartition(dbName, tableName, partVals, serialized);
    StorageDescriptor sd = getStorageDescriptor(sdParts.sdHash);
    HBaseUtils.assembleStorageDescriptor(sd, sdParts);
    if (populateCache)
        partCache.put(dbName, tableName, sdParts.containingPartition);
    return sdParts.containingPartition;
}
Also used : Partition(org.apache.hadoop.hive.metastore.api.Partition) StorageDescriptor(org.apache.hadoop.hive.metastore.api.StorageDescriptor)

Example 4 with Partition

use of org.apache.hadoop.hive.metastore.api.Partition in project hive by apache.

the class HBaseReadWrite method putPartitions.

/**
   * Add a group of partitions.  This should only be used when all partitions are new.  It
   * blindly increments the ref count on the storage descriptor.
   * @param partitions list of partitions to add
   * @throws IOException
   */
void putPartitions(List<Partition> partitions) throws IOException {
    List<Put> puts = new ArrayList<>(partitions.size());
    for (Partition partition : partitions) {
        byte[] hash = putStorageDescriptor(partition.getSd());
        List<String> partTypes = HBaseUtils.getPartitionKeyTypes(getTable(partition.getDbName(), partition.getTableName()).getPartitionKeys());
        byte[][] serialized = HBaseUtils.serializePartition(partition, partTypes, hash);
        Put p = new Put(serialized[0]);
        p.add(CATALOG_CF, CATALOG_COL, serialized[1]);
        puts.add(p);
        partCache.put(partition.getDbName(), partition.getTableName(), partition);
    }
    HTableInterface htab = conn.getHBaseTable(PART_TABLE);
    htab.put(puts);
    conn.flush(htab);
}
Also used : Partition(org.apache.hadoop.hive.metastore.api.Partition) ArrayList(java.util.ArrayList) HTableInterface(org.apache.hadoop.hbase.client.HTableInterface) Put(org.apache.hadoop.hbase.client.Put)

Example 5 with Partition

use of org.apache.hadoop.hive.metastore.api.Partition in project hive by apache.

the class HiveAlterHandler method alterPartitions.

@Override
public List<Partition> alterPartitions(final RawStore msdb, Warehouse wh, final String dbname, final String name, final List<Partition> new_parts, EnvironmentContext environmentContext, HMSHandler handler) throws InvalidOperationException, InvalidObjectException, AlreadyExistsException, MetaException {
    List<Partition> oldParts = new ArrayList<Partition>();
    List<List<String>> partValsList = new ArrayList<List<String>>();
    List<MetaStoreEventListener> transactionalListeners = null;
    if (handler != null) {
        transactionalListeners = handler.getTransactionalListeners();
    }
    Table tbl = msdb.getTable(dbname, name);
    if (tbl == null) {
        throw new InvalidObjectException("Unable to alter partitions because table or database does not exist.");
    }
    boolean success = false;
    try {
        msdb.openTransaction();
        for (Partition tmpPart : new_parts) {
            // Set DDL time to now if not specified
            if (tmpPart.getParameters() == null || tmpPart.getParameters().get(hive_metastoreConstants.DDL_TIME) == null || Integer.parseInt(tmpPart.getParameters().get(hive_metastoreConstants.DDL_TIME)) == 0) {
                tmpPart.putToParameters(hive_metastoreConstants.DDL_TIME, Long.toString(System.currentTimeMillis() / 1000));
            }
            Partition oldTmpPart = msdb.getPartition(dbname, name, tmpPart.getValues());
            oldParts.add(oldTmpPart);
            partValsList.add(tmpPart.getValues());
            if (MetaStoreUtils.requireCalStats(hiveConf, oldTmpPart, tmpPart, tbl, environmentContext)) {
                // Check if stats are same, no need to update
                if (MetaStoreUtils.isFastStatsSame(oldTmpPart, tmpPart)) {
                    MetaStoreUtils.updateBasicState(environmentContext, tmpPart.getParameters());
                } else {
                    MetaStoreUtils.updatePartitionStatsFast(tmpPart, wh, false, true, environmentContext);
                }
            }
            updatePartColumnStats(msdb, dbname, name, oldTmpPart.getValues(), tmpPart);
        }
        msdb.alterPartitions(dbname, name, partValsList, new_parts);
        Iterator<Partition> oldPartsIt = oldParts.iterator();
        for (Partition newPart : new_parts) {
            Partition oldPart;
            if (oldPartsIt.hasNext()) {
                oldPart = oldPartsIt.next();
            } else {
                throw new InvalidOperationException("Missing old partition corresponding to new partition " + "when invoking MetaStoreEventListener for alterPartitions event.");
            }
            if (transactionalListeners != null && transactionalListeners.size() > 0) {
                AlterPartitionEvent alterPartitionEvent = new AlterPartitionEvent(oldPart, newPart, tbl, true, handler);
                for (MetaStoreEventListener transactionalListener : transactionalListeners) {
                    transactionalListener.onAlterPartition(alterPartitionEvent);
                }
            }
        }
        success = msdb.commitTransaction();
    } catch (InvalidObjectException | NoSuchObjectException e) {
        throw new InvalidOperationException("Alter partition operation failed: " + e);
    } finally {
        if (!success) {
            msdb.rollbackTransaction();
        }
    }
    return oldParts;
}
Also used : Partition(org.apache.hadoop.hive.metastore.api.Partition) Table(org.apache.hadoop.hive.metastore.api.Table) AlterPartitionEvent(org.apache.hadoop.hive.metastore.events.AlterPartitionEvent) ArrayList(java.util.ArrayList) InvalidOperationException(org.apache.hadoop.hive.metastore.api.InvalidOperationException) ArrayList(java.util.ArrayList) List(java.util.List) InvalidObjectException(org.apache.hadoop.hive.metastore.api.InvalidObjectException) NoSuchObjectException(org.apache.hadoop.hive.metastore.api.NoSuchObjectException)

Aggregations

Partition (org.apache.hadoop.hive.metastore.api.Partition)736 Test (org.junit.Test)430 Table (org.apache.hadoop.hive.metastore.api.Table)314 ArrayList (java.util.ArrayList)305 MetastoreCheckinTest (org.apache.hadoop.hive.metastore.annotation.MetastoreCheckinTest)254 MetaException (org.apache.hadoop.hive.metastore.api.MetaException)133 FieldSchema (org.apache.hadoop.hive.metastore.api.FieldSchema)116 List (java.util.List)109 Path (org.apache.hadoop.fs.Path)109 StorageDescriptor (org.apache.hadoop.hive.metastore.api.StorageDescriptor)107 NoSuchObjectException (org.apache.hadoop.hive.metastore.api.NoSuchObjectException)89 SerDeInfo (org.apache.hadoop.hive.metastore.api.SerDeInfo)71 HashMap (java.util.HashMap)64 PartitionBuilder (org.apache.hadoop.hive.metastore.client.builder.PartitionBuilder)63 TException (org.apache.thrift.TException)63 IOException (java.io.IOException)61 Database (org.apache.hadoop.hive.metastore.api.Database)55 PartitionSpecProxy (org.apache.hadoop.hive.metastore.partition.spec.PartitionSpecProxy)53 FileSystem (org.apache.hadoop.fs.FileSystem)40 ColumnStatisticsObj (org.apache.hadoop.hive.metastore.api.ColumnStatisticsObj)40