use of org.apache.hadoop.hive.metastore.api.Partition in project presto by prestodb.
the class ThriftHiveMetastore method addPartitions.
@Override
public void addPartitions(MetastoreContext metastoreContext, String databaseName, String tableName, List<PartitionWithStatistics> partitionsWithStatistics) {
List<Partition> partitions = partitionsWithStatistics.stream().map(part -> ThriftMetastoreUtil.toMetastoreApiPartition(part, metastoreContext.getColumnConverter())).collect(toImmutableList());
addPartitionsWithoutStatistics(metastoreContext, databaseName, tableName, partitions);
for (PartitionWithStatistics partitionWithStatistics : partitionsWithStatistics) {
storePartitionColumnStatistics(metastoreContext, databaseName, tableName, partitionWithStatistics.getPartitionName(), partitionWithStatistics);
}
}
use of org.apache.hadoop.hive.metastore.api.Partition in project presto by prestodb.
the class InMemoryHiveMetastore method listAllDataPaths.
private static List<String> listAllDataPaths(MetastoreContext metastoreContext, HiveMetastore metastore, String schemaName, String tableName) {
ImmutableList.Builder<String> locations = ImmutableList.builder();
Table table = metastore.getTable(metastoreContext, schemaName, tableName).get();
if (table.getSd().getLocation() != null) {
// For unpartitioned table, there should be nothing directly under this directory.
// But including this location in the set makes the directory content assert more
// extensive, which is desirable.
locations.add(table.getSd().getLocation());
}
Optional<List<String>> partitionNames = metastore.getPartitionNames(metastoreContext, schemaName, tableName);
if (partitionNames.isPresent()) {
metastore.getPartitionsByNames(metastoreContext, schemaName, tableName, partitionNames.get()).stream().map(partition -> partition.getSd().getLocation()).filter(location -> !location.startsWith(table.getSd().getLocation())).forEach(locations::add);
}
return locations.build();
}
use of org.apache.hadoop.hive.metastore.api.Partition in project presto by prestodb.
the class InMemoryHiveMetastore method getPartitionsByNames.
@Override
public synchronized List<Partition> getPartitionsByNames(MetastoreContext metastoreContext, String databaseName, String tableName, List<String> partitionNames) {
ImmutableList.Builder<Partition> builder = ImmutableList.builder();
for (String name : partitionNames) {
PartitionName partitionName = PartitionName.partition(databaseName, tableName, name);
Partition partition = getPartitionFromInMemoryMap(metastoreContext, partitionName);
if (partition == null) {
return ImmutableList.of();
}
builder.add(partition.deepCopy());
}
return builder.build();
}
use of org.apache.hadoop.hive.metastore.api.Partition in project storm by apache.
the class HiveSetupUtil method addPartition.
private static void addPartition(IMetaStoreClient client, Table tbl, List<String> partValues) throws IOException, TException {
Partition part = new Partition();
part.setDbName(tbl.getDbName());
part.setTableName(tbl.getTableName());
StorageDescriptor sd = new StorageDescriptor(tbl.getSd());
sd.setLocation(sd.getLocation() + Path.SEPARATOR + makePartPath(tbl.getPartitionKeys(), partValues));
part.setSd(sd);
part.setValues(partValues);
client.add_partition(part);
}
use of org.apache.hadoop.hive.metastore.api.Partition in project flink by apache.
the class HiveLookupTableSource method getLookupFunction.
private TableFunction<RowData> getLookupFunction(int[] keys) {
final String defaultPartitionName = JobConfUtils.getDefaultPartitionName(jobConf);
PartitionFetcher.Context<HiveTablePartition> fetcherContext = new HiveTablePartitionFetcherContext(tablePath, hiveShim, new JobConfWrapper(jobConf), catalogTable.getPartitionKeys(), getProducedTableSchema().getFieldDataTypes(), getProducedTableSchema().getFieldNames(), configuration, defaultPartitionName);
final PartitionFetcher<HiveTablePartition> partitionFetcher;
// avoid lambda capture
final ObjectPath tableFullPath = tablePath;
if (catalogTable.getPartitionKeys().isEmpty()) {
// non-partitioned table, the fetcher fetches the partition which represents the given
// table.
partitionFetcher = context -> {
List<HiveTablePartition> partValueList = new ArrayList<>();
partValueList.add(context.getPartition(new ArrayList<>()).orElseThrow(() -> new IllegalArgumentException(String.format("Fetch partition fail for hive table %s.", tableFullPath))));
return partValueList;
};
} else if (isStreamingSource()) {
// streaming-read partitioned table, the fetcher fetches the latest partition of the
// given table.
partitionFetcher = context -> {
List<HiveTablePartition> partValueList = new ArrayList<>();
List<PartitionFetcher.Context.ComparablePartitionValue> comparablePartitionValues = context.getComparablePartitionValueList();
// fetch latest partitions for partitioned table
if (comparablePartitionValues.size() > 0) {
// sort in desc order
comparablePartitionValues.sort((o1, o2) -> o2.getComparator().compareTo(o1.getComparator()));
PartitionFetcher.Context.ComparablePartitionValue maxPartition = comparablePartitionValues.get(0);
partValueList.add(context.getPartition((List<String>) maxPartition.getPartitionValue()).orElseThrow(() -> new IllegalArgumentException(String.format("Fetch partition fail for hive table %s.", tableFullPath))));
} else {
throw new IllegalArgumentException(String.format("At least one partition is required when set '%s' to 'latest' in temporal join," + " but actual partition number is '%s' for hive table %s", STREAMING_SOURCE_PARTITION_INCLUDE.key(), comparablePartitionValues.size(), tableFullPath));
}
return partValueList;
};
} else {
// bounded-read partitioned table, the fetcher fetches all partitions of the given
// filesystem table.
partitionFetcher = context -> {
List<HiveTablePartition> partValueList = new ArrayList<>();
List<PartitionFetcher.Context.ComparablePartitionValue> comparablePartitionValues = context.getComparablePartitionValueList();
for (PartitionFetcher.Context.ComparablePartitionValue comparablePartitionValue : comparablePartitionValues) {
partValueList.add(context.getPartition((List<String>) comparablePartitionValue.getPartitionValue()).orElseThrow(() -> new IllegalArgumentException(String.format("Fetch partition fail for hive table %s.", tableFullPath))));
}
return partValueList;
};
}
PartitionReader<HiveTablePartition, RowData> partitionReader = new HiveInputFormatPartitionReader(flinkConf, jobConf, hiveVersion, tablePath, getProducedTableSchema().getFieldDataTypes(), getProducedTableSchema().getFieldNames(), catalogTable.getPartitionKeys(), projectedFields, flinkConf.get(HiveOptions.TABLE_EXEC_HIVE_FALLBACK_MAPRED_READER));
return new FileSystemLookupFunction<>(partitionFetcher, fetcherContext, partitionReader, (RowType) getProducedTableSchema().toRowDataType().getLogicalType(), keys, hiveTableReloadInterval);
}
Aggregations