Search in sources :

Example 66 with Partition

use of org.apache.hadoop.hive.metastore.api.Partition in project hive by apache.

the class TestGetPartitions method testGetPartition.

/**
 * Testing getPartition(String,String,String) ->
 *         get_partition_by_name(String,String,String).
 * @throws Exception
 */
@Test
public void testGetPartition() throws Exception {
    createTable3PartCols1Part(client);
    Partition partition = client.getPartition(DB_NAME, TABLE_NAME, "yyyy=1997/mm=05/dd=16");
    assertNotNull(partition);
    assertEquals(Lists.newArrayList("1997", "05", "16"), partition.getValues());
}
Also used : Partition(org.apache.hadoop.hive.metastore.api.Partition) Test(org.junit.Test) MetastoreCheckinTest(org.apache.hadoop.hive.metastore.annotation.MetastoreCheckinTest)

Example 67 with Partition

use of org.apache.hadoop.hive.metastore.api.Partition in project hive by apache.

the class FileOutputCommitterContainer method constructPartition.

/**
 * Generate partition metadata object to be used to add to metadata.
 * @param context The job context.
 * @param jobInfo The OutputJobInfo.
 * @param partLocnRoot The table-equivalent location root of the partition
 *                       (temporary dir if dynamic partition, table dir if static)
 * @param dynPartPath The path of dynamic partition which is created
 * @param partKVs The keyvalue pairs that form the partition
 * @param outputSchema The output schema for the partition
 * @param params The parameters to store inside the partition
 * @param table The Table metadata object under which this Partition will reside
 * @param fs FileSystem object to operate on the underlying filesystem
 * @param grpName Group name that owns the table dir
 * @param perms FsPermission that's the default permission of the table dir.
 * @return Constructed Partition metadata object
 * @throws java.io.IOException
 */
private Partition constructPartition(JobContext context, OutputJobInfo jobInfo, String partLocnRoot, String dynPartPath, Map<String, String> partKVs, HCatSchema outputSchema, Map<String, String> params, Table table, FileSystem fs, String grpName, FsPermission perms) throws IOException {
    Partition partition = new Partition();
    partition.setDbName(table.getDbName());
    partition.setTableName(table.getTableName());
    partition.setSd(new StorageDescriptor(table.getTTable().getSd()));
    List<FieldSchema> fields = new ArrayList<FieldSchema>();
    for (HCatFieldSchema fieldSchema : outputSchema.getFields()) {
        fields.add(HCatSchemaUtils.getFieldSchema(fieldSchema));
    }
    partition.getSd().setCols(fields);
    partition.setValues(FileOutputFormatContainer.getPartitionValueList(table, partKVs));
    partition.setParameters(params);
    // Sets permissions and group name on partition dirs and files.
    Path partPath;
    if (customDynamicLocationUsed) {
        partPath = new Path(dynPartPath);
    } else if (!dynamicPartitioningUsed && Boolean.parseBoolean((String) table.getProperty("EXTERNAL")) && jobInfo.getLocation() != null && jobInfo.getLocation().length() > 0) {
        // Now, we need to de-scratchify this location - i.e., get rid of any
        // _SCRATCH[\d].?[\d]+ from the location.
        String jobLocation = jobInfo.getLocation();
        String finalLocn = jobLocation.replaceAll(Path.SEPARATOR + SCRATCH_DIR_NAME + "\\d\\.?\\d+", "");
        partPath = new Path(finalLocn);
    } else {
        partPath = new Path(partLocnRoot);
        int i = 0;
        for (FieldSchema partKey : table.getPartitionKeys()) {
            if (i++ != 0) {
                // Attempt to make the path in case it does not exist before we check
                fs.mkdirs(partPath);
                applyGroupAndPerms(fs, partPath, perms, grpName, false);
            }
            partPath = constructPartialPartPath(partPath, partKey.getName().toLowerCase(), partKVs);
        }
    }
    // Apply the group and permissions to the leaf partition and files.
    // Need not bother in case of HDFS as permission is taken care of by setting UMask
    // Attempt to make the path in case it does not exist before we check
    fs.mkdirs(partPath);
    if (!ShimLoader.getHadoopShims().getHCatShim().isFileInHDFS(fs, partPath)) {
        applyGroupAndPerms(fs, partPath, perms, grpName, true);
    }
    // Set the location in the StorageDescriptor
    if (dynamicPartitioningUsed) {
        String dynamicPartitionDestination = getFinalDynamicPartitionDestination(table, partKVs, jobInfo);
        if (harProcessor.isEnabled()) {
            harProcessor.exec(context, partition, partPath);
            partition.getSd().setLocation(harProcessor.getProcessedLocation(new Path(dynamicPartitionDestination)));
        } else {
            partition.getSd().setLocation(dynamicPartitionDestination);
        }
    } else {
        partition.getSd().setLocation(partPath.toString());
    }
    return partition;
}
Also used : Path(org.apache.hadoop.fs.Path) Partition(org.apache.hadoop.hive.metastore.api.Partition) HCatFieldSchema(org.apache.hive.hcatalog.data.schema.HCatFieldSchema) FieldSchema(org.apache.hadoop.hive.metastore.api.FieldSchema) StorageDescriptor(org.apache.hadoop.hive.metastore.api.StorageDescriptor) ArrayList(java.util.ArrayList) HCatFieldSchema(org.apache.hive.hcatalog.data.schema.HCatFieldSchema)

Example 68 with Partition

use of org.apache.hadoop.hive.metastore.api.Partition in project hive by apache.

the class CachedStore method getPartition.

@Override
public Partition getPartition(String dbName, String tblName, List<String> part_vals) throws MetaException, NoSuchObjectException {
    dbName = StringUtils.normalizeIdentifier(dbName);
    tblName = StringUtils.normalizeIdentifier(tblName);
    if (!shouldCacheTable(dbName, tblName)) {
        return rawStore.getPartition(dbName, tblName, part_vals);
    }
    Partition part = sharedCache.getPartitionFromCache(dbName, tblName, part_vals);
    if (part == null) {
        // The table containing the partition is not yet loaded in cache
        return rawStore.getPartition(dbName, tblName, part_vals);
    }
    return part;
}
Also used : Partition(org.apache.hadoop.hive.metastore.api.Partition)

Example 69 with Partition

use of org.apache.hadoop.hive.metastore.api.Partition in project hive by apache.

the class CachedStore method getPartitions.

@Override
public List<Partition> getPartitions(String dbName, String tblName, int max) throws MetaException, NoSuchObjectException {
    dbName = StringUtils.normalizeIdentifier(dbName);
    tblName = StringUtils.normalizeIdentifier(tblName);
    if (!shouldCacheTable(dbName, tblName)) {
        return rawStore.getPartitions(dbName, tblName, max);
    }
    Table tbl = sharedCache.getTableFromCache(dbName, tblName);
    if (tbl == null) {
        // The table containing the partitions is not yet loaded in cache
        return rawStore.getPartitions(dbName, tblName, max);
    }
    List<Partition> parts = sharedCache.listCachedPartitions(dbName, tblName, max);
    return parts;
}
Also used : Partition(org.apache.hadoop.hive.metastore.api.Partition) Table(org.apache.hadoop.hive.metastore.api.Table)

Example 70 with Partition

use of org.apache.hadoop.hive.metastore.api.Partition in project hive by apache.

the class CachedStore method listPartitionNamesPs.

@Override
public List<String> listPartitionNamesPs(String dbName, String tblName, List<String> partVals, short maxParts) throws MetaException, NoSuchObjectException {
    dbName = StringUtils.normalizeIdentifier(dbName);
    tblName = StringUtils.normalizeIdentifier(tblName);
    if (!shouldCacheTable(dbName, tblName)) {
        return rawStore.listPartitionNamesPs(dbName, tblName, partVals, maxParts);
    }
    Table table = sharedCache.getTableFromCache(dbName, tblName);
    if (table == null) {
        // The table is not yet loaded in cache
        return rawStore.listPartitionNamesPs(dbName, tblName, partVals, maxParts);
    }
    List<String> partNames = new ArrayList<>();
    int count = 0;
    for (Partition part : sharedCache.listCachedPartitions(dbName, tblName, maxParts)) {
        boolean psMatch = true;
        for (int i = 0; i < partVals.size(); i++) {
            String psVal = partVals.get(i);
            String partVal = part.getValues().get(i);
            if (psVal != null && !psVal.isEmpty() && !psVal.equals(partVal)) {
                psMatch = false;
                break;
            }
        }
        if (!psMatch) {
            continue;
        }
        if (maxParts == -1 || count < maxParts) {
            partNames.add(Warehouse.makePartName(table.getPartitionKeys(), part.getValues()));
            count++;
        }
    }
    return partNames;
}
Also used : Partition(org.apache.hadoop.hive.metastore.api.Partition) Table(org.apache.hadoop.hive.metastore.api.Table) ArrayList(java.util.ArrayList) SQLCheckConstraint(org.apache.hadoop.hive.metastore.api.SQLCheckConstraint) SQLNotNullConstraint(org.apache.hadoop.hive.metastore.api.SQLNotNullConstraint) SQLUniqueConstraint(org.apache.hadoop.hive.metastore.api.SQLUniqueConstraint) SQLDefaultConstraint(org.apache.hadoop.hive.metastore.api.SQLDefaultConstraint)

Aggregations

Partition (org.apache.hadoop.hive.metastore.api.Partition)730 Test (org.junit.Test)430 Table (org.apache.hadoop.hive.metastore.api.Table)312 ArrayList (java.util.ArrayList)303 MetastoreCheckinTest (org.apache.hadoop.hive.metastore.annotation.MetastoreCheckinTest)254 MetaException (org.apache.hadoop.hive.metastore.api.MetaException)131 FieldSchema (org.apache.hadoop.hive.metastore.api.FieldSchema)115 List (java.util.List)109 Path (org.apache.hadoop.fs.Path)109 StorageDescriptor (org.apache.hadoop.hive.metastore.api.StorageDescriptor)107 NoSuchObjectException (org.apache.hadoop.hive.metastore.api.NoSuchObjectException)87 SerDeInfo (org.apache.hadoop.hive.metastore.api.SerDeInfo)71 HashMap (java.util.HashMap)64 PartitionBuilder (org.apache.hadoop.hive.metastore.client.builder.PartitionBuilder)63 TException (org.apache.thrift.TException)62 IOException (java.io.IOException)61 Database (org.apache.hadoop.hive.metastore.api.Database)55 PartitionSpecProxy (org.apache.hadoop.hive.metastore.partition.spec.PartitionSpecProxy)52 FileSystem (org.apache.hadoop.fs.FileSystem)40 ColumnStatisticsObj (org.apache.hadoop.hive.metastore.api.ColumnStatisticsObj)40