use of org.apache.hadoop.hive.metastore.api.Partition in project hive by apache.
the class TestGetPartitions method testGetPartition.
/**
* Testing getPartition(String,String,String) ->
* get_partition_by_name(String,String,String).
* @throws Exception
*/
@Test
public void testGetPartition() throws Exception {
createTable3PartCols1Part(client);
Partition partition = client.getPartition(DB_NAME, TABLE_NAME, "yyyy=1997/mm=05/dd=16");
assertNotNull(partition);
assertEquals(Lists.newArrayList("1997", "05", "16"), partition.getValues());
}
use of org.apache.hadoop.hive.metastore.api.Partition in project hive by apache.
the class FileOutputCommitterContainer method constructPartition.
/**
* Generate partition metadata object to be used to add to metadata.
* @param context The job context.
* @param jobInfo The OutputJobInfo.
* @param partLocnRoot The table-equivalent location root of the partition
* (temporary dir if dynamic partition, table dir if static)
* @param dynPartPath The path of dynamic partition which is created
* @param partKVs The keyvalue pairs that form the partition
* @param outputSchema The output schema for the partition
* @param params The parameters to store inside the partition
* @param table The Table metadata object under which this Partition will reside
* @param fs FileSystem object to operate on the underlying filesystem
* @param grpName Group name that owns the table dir
* @param perms FsPermission that's the default permission of the table dir.
* @return Constructed Partition metadata object
* @throws java.io.IOException
*/
private Partition constructPartition(JobContext context, OutputJobInfo jobInfo, String partLocnRoot, String dynPartPath, Map<String, String> partKVs, HCatSchema outputSchema, Map<String, String> params, Table table, FileSystem fs, String grpName, FsPermission perms) throws IOException {
Partition partition = new Partition();
partition.setDbName(table.getDbName());
partition.setTableName(table.getTableName());
partition.setSd(new StorageDescriptor(table.getTTable().getSd()));
List<FieldSchema> fields = new ArrayList<FieldSchema>();
for (HCatFieldSchema fieldSchema : outputSchema.getFields()) {
fields.add(HCatSchemaUtils.getFieldSchema(fieldSchema));
}
partition.getSd().setCols(fields);
partition.setValues(FileOutputFormatContainer.getPartitionValueList(table, partKVs));
partition.setParameters(params);
// Sets permissions and group name on partition dirs and files.
Path partPath;
if (customDynamicLocationUsed) {
partPath = new Path(dynPartPath);
} else if (!dynamicPartitioningUsed && Boolean.parseBoolean((String) table.getProperty("EXTERNAL")) && jobInfo.getLocation() != null && jobInfo.getLocation().length() > 0) {
// Now, we need to de-scratchify this location - i.e., get rid of any
// _SCRATCH[\d].?[\d]+ from the location.
String jobLocation = jobInfo.getLocation();
String finalLocn = jobLocation.replaceAll(Path.SEPARATOR + SCRATCH_DIR_NAME + "\\d\\.?\\d+", "");
partPath = new Path(finalLocn);
} else {
partPath = new Path(partLocnRoot);
int i = 0;
for (FieldSchema partKey : table.getPartitionKeys()) {
if (i++ != 0) {
// Attempt to make the path in case it does not exist before we check
fs.mkdirs(partPath);
applyGroupAndPerms(fs, partPath, perms, grpName, false);
}
partPath = constructPartialPartPath(partPath, partKey.getName().toLowerCase(), partKVs);
}
}
// Apply the group and permissions to the leaf partition and files.
// Need not bother in case of HDFS as permission is taken care of by setting UMask
// Attempt to make the path in case it does not exist before we check
fs.mkdirs(partPath);
if (!ShimLoader.getHadoopShims().getHCatShim().isFileInHDFS(fs, partPath)) {
applyGroupAndPerms(fs, partPath, perms, grpName, true);
}
// Set the location in the StorageDescriptor
if (dynamicPartitioningUsed) {
String dynamicPartitionDestination = getFinalDynamicPartitionDestination(table, partKVs, jobInfo);
if (harProcessor.isEnabled()) {
harProcessor.exec(context, partition, partPath);
partition.getSd().setLocation(harProcessor.getProcessedLocation(new Path(dynamicPartitionDestination)));
} else {
partition.getSd().setLocation(dynamicPartitionDestination);
}
} else {
partition.getSd().setLocation(partPath.toString());
}
return partition;
}
use of org.apache.hadoop.hive.metastore.api.Partition in project hive by apache.
the class CachedStore method getPartition.
@Override
public Partition getPartition(String dbName, String tblName, List<String> part_vals) throws MetaException, NoSuchObjectException {
dbName = StringUtils.normalizeIdentifier(dbName);
tblName = StringUtils.normalizeIdentifier(tblName);
if (!shouldCacheTable(dbName, tblName)) {
return rawStore.getPartition(dbName, tblName, part_vals);
}
Partition part = sharedCache.getPartitionFromCache(dbName, tblName, part_vals);
if (part == null) {
// The table containing the partition is not yet loaded in cache
return rawStore.getPartition(dbName, tblName, part_vals);
}
return part;
}
use of org.apache.hadoop.hive.metastore.api.Partition in project hive by apache.
the class CachedStore method getPartitions.
@Override
public List<Partition> getPartitions(String dbName, String tblName, int max) throws MetaException, NoSuchObjectException {
dbName = StringUtils.normalizeIdentifier(dbName);
tblName = StringUtils.normalizeIdentifier(tblName);
if (!shouldCacheTable(dbName, tblName)) {
return rawStore.getPartitions(dbName, tblName, max);
}
Table tbl = sharedCache.getTableFromCache(dbName, tblName);
if (tbl == null) {
// The table containing the partitions is not yet loaded in cache
return rawStore.getPartitions(dbName, tblName, max);
}
List<Partition> parts = sharedCache.listCachedPartitions(dbName, tblName, max);
return parts;
}
use of org.apache.hadoop.hive.metastore.api.Partition in project hive by apache.
the class CachedStore method listPartitionNamesPs.
@Override
public List<String> listPartitionNamesPs(String dbName, String tblName, List<String> partVals, short maxParts) throws MetaException, NoSuchObjectException {
dbName = StringUtils.normalizeIdentifier(dbName);
tblName = StringUtils.normalizeIdentifier(tblName);
if (!shouldCacheTable(dbName, tblName)) {
return rawStore.listPartitionNamesPs(dbName, tblName, partVals, maxParts);
}
Table table = sharedCache.getTableFromCache(dbName, tblName);
if (table == null) {
// The table is not yet loaded in cache
return rawStore.listPartitionNamesPs(dbName, tblName, partVals, maxParts);
}
List<String> partNames = new ArrayList<>();
int count = 0;
for (Partition part : sharedCache.listCachedPartitions(dbName, tblName, maxParts)) {
boolean psMatch = true;
for (int i = 0; i < partVals.size(); i++) {
String psVal = partVals.get(i);
String partVal = part.getValues().get(i);
if (psVal != null && !psVal.isEmpty() && !psVal.equals(partVal)) {
psMatch = false;
break;
}
}
if (!psMatch) {
continue;
}
if (maxParts == -1 || count < maxParts) {
partNames.add(Warehouse.makePartName(table.getPartitionKeys(), part.getValues()));
count++;
}
}
return partNames;
}
Aggregations