use of com.facebook.presto.hive.metastore.MetastoreContext in project presto by prestodb.
the class HiveMetadata method buildTableObject.
private static Table buildTableObject(String queryId, String schemaName, String tableName, String tableOwner, List<HiveColumnHandle> columnHandles, HiveStorageFormat hiveStorageFormat, List<String> partitionedBy, Optional<HiveBucketProperty> bucketProperty, List<SortingColumn> preferredOrderingColumns, Map<String, String> additionalTableParameters, Path targetPath, PrestoTableType tableType, String prestoVersion, MetastoreContext metastoreContext) {
Map<String, HiveColumnHandle> columnHandlesByName = Maps.uniqueIndex(columnHandles, HiveColumnHandle::getName);
List<Column> partitionColumns = partitionedBy.stream().map(columnHandlesByName::get).map(column -> columnHandleToColumn(metastoreContext, column)).collect(toList());
Set<String> partitionColumnNames = ImmutableSet.copyOf(partitionedBy);
ImmutableList.Builder<Column> columns = ImmutableList.builder();
for (HiveColumnHandle columnHandle : columnHandles) {
String name = columnHandle.getName();
HiveType type = columnHandle.getHiveType();
if (!partitionColumnNames.contains(name)) {
verify(!columnHandle.isPartitionKey(), "Column handles are not consistent with partitioned by property");
columns.add(columnHandleToColumn(metastoreContext, columnHandle));
} else {
verify(columnHandle.isPartitionKey(), "Column handles are not consistent with partitioned by property");
}
}
ImmutableMap.Builder<String, String> tableParameters = ImmutableMap.<String, String>builder().put(PRESTO_VERSION_NAME, prestoVersion).put(PRESTO_QUERY_ID_NAME, queryId).putAll(additionalTableParameters);
if (tableType.equals(EXTERNAL_TABLE)) {
tableParameters.put("EXTERNAL", "TRUE");
}
Table.Builder tableBuilder = Table.builder().setDatabaseName(schemaName).setTableName(tableName).setOwner(tableOwner).setTableType(tableType).setDataColumns(columns.build()).setPartitionColumns(partitionColumns).setParameters(tableParameters.build());
tableBuilder.getStorageBuilder().setStorageFormat(fromHiveStorageFormat(hiveStorageFormat)).setBucketProperty(bucketProperty).setParameters(ImmutableMap.of(PREFERRED_ORDERING_COLUMNS, encodePreferredOrderingColumns(preferredOrderingColumns))).setLocation(targetPath.toString());
return tableBuilder.build();
}
use of com.facebook.presto.hive.metastore.MetastoreContext in project presto by prestodb.
the class HiveMetadata method metadataDelete.
@Override
public OptionalLong metadataDelete(ConnectorSession session, ConnectorTableHandle tableHandle, ConnectorTableLayoutHandle tableLayoutHandle) {
HiveTableHandle handle = (HiveTableHandle) tableHandle;
HiveTableLayoutHandle layoutHandle = (HiveTableLayoutHandle) tableLayoutHandle;
MetastoreContext metastoreContext = getMetastoreContext(session);
Optional<Table> table = metastore.getTable(metastoreContext, handle.getSchemaName(), handle.getTableName());
if (!table.isPresent()) {
throw new TableNotFoundException(handle.getSchemaTableName());
}
if (table.get().getPartitionColumns().isEmpty()) {
metastore.truncateUnpartitionedTable(session, handle.getSchemaName(), handle.getTableName());
} else {
for (HivePartition hivePartition : getOrComputePartitions(layoutHandle, session, tableHandle)) {
metastore.dropPartition(session, handle.getSchemaName(), handle.getTableName(), table.get().getStorage().getLocation(), toPartitionValues(hivePartition.getPartitionId()));
}
}
// it is too expensive to determine the exact number of deleted rows
return OptionalLong.empty();
}
use of com.facebook.presto.hive.metastore.MetastoreContext in project presto by prestodb.
the class HiveMetadata method getTableMetadata.
private ConnectorTableMetadata getTableMetadata(ConnectorSession session, SchemaTableName tableName) {
MetastoreContext metastoreContext = getMetastoreContext(session);
Optional<Table> table = metastore.getTable(metastoreContext, tableName.getSchemaName(), tableName.getTableName());
if (!table.isPresent() || table.get().getTableType().equals(VIRTUAL_VIEW)) {
throw new TableNotFoundException(tableName);
}
Function<HiveColumnHandle, ColumnMetadata> metadataGetter = columnMetadataGetter(table.get(), typeManager, metastoreContext.getColumnConverter());
ImmutableList.Builder<ColumnMetadata> columns = ImmutableList.builder();
for (HiveColumnHandle columnHandle : hiveColumnHandles(table.get())) {
columns.add(metadataGetter.apply(columnHandle));
}
// External location property
ImmutableMap.Builder<String, Object> properties = ImmutableMap.builder();
if (table.get().getTableType().equals(EXTERNAL_TABLE)) {
properties.put(EXTERNAL_LOCATION_PROPERTY, table.get().getStorage().getLocation());
}
// Storage format property
HiveStorageFormat format = null;
try {
format = extractHiveStorageFormat(table.get());
properties.put(STORAGE_FORMAT_PROPERTY, format);
} catch (PrestoException ignored) {
// todo fail if format is not known
}
getTableEncryptionPropertiesFromHiveProperties(table.get().getParameters(), format).map(TableEncryptionProperties::toTableProperties).ifPresent(properties::putAll);
// Partitioning property
List<String> partitionedBy = table.get().getPartitionColumns().stream().map(Column::getName).collect(toList());
if (!partitionedBy.isEmpty()) {
properties.put(PARTITIONED_BY_PROPERTY, partitionedBy);
}
// Bucket properties
Optional<HiveBucketProperty> bucketProperty = table.get().getStorage().getBucketProperty();
table.get().getStorage().getBucketProperty().ifPresent(property -> {
properties.put(BUCKET_COUNT_PROPERTY, property.getBucketCount());
properties.put(BUCKETED_BY_PROPERTY, property.getBucketedBy());
properties.put(SORTED_BY_PROPERTY, property.getSortedBy());
});
// Preferred ordering columns
List<SortingColumn> preferredOrderingColumns = decodePreferredOrderingColumnsFromStorage(table.get().getStorage());
if (!preferredOrderingColumns.isEmpty()) {
if (bucketProperty.isPresent()) {
throw new PrestoException(HIVE_INVALID_METADATA, format("bucketed table %s should not specify preferred_ordering_columns", tableName));
}
properties.put(PREFERRED_ORDERING_COLUMNS, preferredOrderingColumns);
}
// ORC format specific properties
String orcBloomFilterColumns = table.get().getParameters().get(ORC_BLOOM_FILTER_COLUMNS_KEY);
if (orcBloomFilterColumns != null) {
properties.put(ORC_BLOOM_FILTER_COLUMNS, Splitter.on(COMMA).trimResults().omitEmptyStrings().splitToList(orcBloomFilterColumns));
}
String orcBloomFilterFfp = table.get().getParameters().get(ORC_BLOOM_FILTER_FPP_KEY);
if (orcBloomFilterFfp != null) {
properties.put(ORC_BLOOM_FILTER_FPP, Double.parseDouble(orcBloomFilterFfp));
}
// Avro specfic property
String avroSchemaUrl = table.get().getParameters().get(AVRO_SCHEMA_URL_KEY);
if (avroSchemaUrl != null) {
properties.put(AVRO_SCHEMA_URL, avroSchemaUrl);
}
// CSV specific property
getCsvSerdeProperty(table.get(), CSV_SEPARATOR_KEY).ifPresent(csvSeparator -> properties.put(CSV_SEPARATOR, csvSeparator));
getCsvSerdeProperty(table.get(), CSV_QUOTE_KEY).ifPresent(csvQuote -> properties.put(CSV_QUOTE, csvQuote));
getCsvSerdeProperty(table.get(), CSV_ESCAPE_KEY).ifPresent(csvEscape -> properties.put(CSV_ESCAPE, csvEscape));
// Hook point for extended versions of the Hive Plugin
properties.putAll(tableParameterCodec.decode(table.get().getParameters()));
Optional<String> comment = Optional.ofNullable(table.get().getParameters().get(TABLE_COMMENT));
return new ConnectorTableMetadata(tableName, columns.build(), properties.build(), comment);
}
use of com.facebook.presto.hive.metastore.MetastoreContext in project presto by prestodb.
the class HiveMetadata method dropTable.
@Override
public void dropTable(ConnectorSession session, ConnectorTableHandle tableHandle) {
HiveTableHandle handle = (HiveTableHandle) tableHandle;
MetastoreContext metastoreContext = getMetastoreContext(session);
Optional<Table> target = metastore.getTable(metastoreContext, handle.getSchemaName(), handle.getTableName());
if (!target.isPresent()) {
throw new TableNotFoundException(handle.getSchemaTableName());
}
metastore.dropTable(new HdfsContext(session, handle.getSchemaName(), handle.getTableName(), target.get().getStorage().getLocation(), false), handle.getSchemaName(), handle.getTableName());
}
use of com.facebook.presto.hive.metastore.MetastoreContext in project presto by prestodb.
the class HiveMetadata method getInsertLayout.
@Override
public Optional<ConnectorNewTableLayout> getInsertLayout(ConnectorSession session, ConnectorTableHandle tableHandle) {
HiveTableHandle hiveTableHandle = (HiveTableHandle) tableHandle;
SchemaTableName tableName = hiveTableHandle.getSchemaTableName();
MetastoreContext metastoreContext = getMetastoreContext(session);
Table table = metastore.getTable(metastoreContext, tableName.getSchemaName(), tableName.getTableName()).orElseThrow(() -> new TableNotFoundException(tableName));
Optional<HiveBucketHandle> hiveBucketHandle = getHiveBucketHandle(table);
if (!hiveBucketHandle.isPresent()) {
return Optional.empty();
}
HiveBucketProperty bucketProperty = table.getStorage().getBucketProperty().orElseThrow(() -> new NoSuchElementException("Bucket property should be set"));
if (!bucketProperty.getSortedBy().isEmpty() && !isSortedWritingEnabled(session)) {
throw new PrestoException(NOT_SUPPORTED, "Writing to bucketed sorted Hive tables is disabled");
}
HivePartitioningHandle partitioningHandle;
int bucketCount = hiveBucketHandle.get().getTableBucketCount();
OptionalInt maxCompatibleBucketCount = OptionalInt.of(bucketCount);
switch(bucketProperty.getBucketFunctionType()) {
case HIVE_COMPATIBLE:
partitioningHandle = createHiveCompatiblePartitioningHandle(bucketCount, hiveBucketHandle.get().getColumns().stream().map(HiveColumnHandle::getHiveType).collect(toImmutableList()), maxCompatibleBucketCount);
break;
case PRESTO_NATIVE:
partitioningHandle = createPrestoNativePartitioningHandle(bucketCount, bucketProperty.getTypes().get(), maxCompatibleBucketCount);
break;
default:
throw new IllegalArgumentException("Unsupported bucket function type " + bucketProperty.getBucketFunctionType());
}
List<String> partitionColumns = hiveBucketHandle.get().getColumns().stream().map(HiveColumnHandle::getName).collect(toList());
return Optional.of(new ConnectorNewTableLayout(partitioningHandle, partitionColumns));
}
Aggregations