use of com.facebook.presto.spi.StandardErrorCode.NOT_SUPPORTED in project presto by prestodb.
the class NativeCassandraSession method getTableMetadata.
private static AbstractTableMetadata getTableMetadata(KeyspaceMetadata keyspace, String caseInsensitiveTableName) {
List<AbstractTableMetadata> tables = Stream.concat(keyspace.getTables().stream(), keyspace.getMaterializedViews().stream()).filter(table -> table.getName().equalsIgnoreCase(caseInsensitiveTableName)).collect(toImmutableList());
if (tables.size() == 0) {
throw new TableNotFoundException(new SchemaTableName(keyspace.getName(), caseInsensitiveTableName));
} else if (tables.size() == 1) {
return tables.get(0);
}
String tableNames = tables.stream().map(AbstractTableMetadata::getName).sorted().collect(joining(", "));
throw new PrestoException(NOT_SUPPORTED, format("More than one table has been found for the case insensitive table name: %s -> (%s)", caseInsensitiveTableName, tableNames));
}
use of com.facebook.presto.spi.StandardErrorCode.NOT_SUPPORTED in project presto by prestodb.
the class ConcatFunction method generateConcat.
private static Class<?> generateConcat(TypeSignature type, int arity) {
checkCondition(arity <= 254, NOT_SUPPORTED, "Too many arguments for string concatenation");
ClassDefinition definition = new ClassDefinition(a(PUBLIC, FINAL), makeClassName(type.getBase() + "_concat" + arity + "ScalarFunction"), type(Object.class));
// Generate constructor
definition.declareDefaultConstructor(a(PRIVATE));
// Generate concat()
List<Parameter> parameters = IntStream.range(0, arity).mapToObj(i -> arg("arg" + i, Slice.class)).collect(toImmutableList());
MethodDefinition method = definition.declareMethod(a(PUBLIC, STATIC), "concat", type(Slice.class), parameters);
Scope scope = method.getScope();
BytecodeBlock body = method.getBody();
Variable result = scope.declareVariable(Slice.class, "result");
body.append(invokeStatic(ConcatFunction.class, "concat", Slice.class, BytecodeExpressions.newArray(ParameterizedType.type(Slice[].class), parameters))).retObject();
return defineClass(definition, Object.class, ImmutableMap.of(), new DynamicClassLoader(ConcatFunction.class.getClassLoader()));
}
use of com.facebook.presto.spi.StandardErrorCode.NOT_SUPPORTED in project presto by prestodb.
the class HiveMetadata method getNewTableLayout.
@Override
public Optional<ConnectorNewTableLayout> getNewTableLayout(ConnectorSession session, ConnectorTableMetadata tableMetadata) {
validatePartitionColumns(tableMetadata);
validateBucketColumns(tableMetadata);
validateCsvColumns(tableMetadata);
Optional<HiveBucketProperty> bucketProperty = getBucketProperty(tableMetadata.getProperties());
if (!bucketProperty.isPresent()) {
return Optional.empty();
}
checkArgument(bucketProperty.get().getBucketFunctionType().equals(BucketFunctionType.HIVE_COMPATIBLE), "bucketFunctionType is expected to be HIVE_COMPATIBLE, got: %s", bucketProperty.get().getBucketFunctionType());
if (!bucketProperty.get().getSortedBy().isEmpty() && !isSortedWritingEnabled(session)) {
throw new PrestoException(NOT_SUPPORTED, "Writing to bucketed sorted Hive tables is disabled");
}
List<String> bucketedBy = bucketProperty.get().getBucketedBy();
Map<String, HiveType> hiveTypeMap = tableMetadata.getColumns().stream().collect(toMap(ColumnMetadata::getName, column -> toHiveType(typeTranslator, column.getType())));
return Optional.of(new ConnectorNewTableLayout(createHiveCompatiblePartitioningHandle(bucketProperty.get().getBucketCount(), bucketedBy.stream().map(hiveTypeMap::get).collect(toImmutableList()), OptionalInt.of(bucketProperty.get().getBucketCount())), bucketedBy));
}
use of com.facebook.presto.spi.StandardErrorCode.NOT_SUPPORTED in project presto by prestodb.
the class HiveMetadata method beginCreateTable.
@Override
public HiveOutputTableHandle beginCreateTable(ConnectorSession session, ConnectorTableMetadata tableMetadata, Optional<ConnectorNewTableLayout> layout) {
verifyJvmTimeZone();
if (getExternalLocation(tableMetadata.getProperties()) != null) {
throw new PrestoException(NOT_SUPPORTED, "External tables cannot be created using CREATE TABLE AS");
}
if (getAvroSchemaUrl(tableMetadata.getProperties()) != null) {
throw new PrestoException(NOT_SUPPORTED, "CREATE TABLE AS not supported when Avro schema url is set");
}
HiveStorageFormat tableStorageFormat = getHiveStorageFormat(tableMetadata.getProperties());
List<String> partitionedBy = getPartitionedBy(tableMetadata.getProperties());
Optional<HiveBucketProperty> bucketProperty = getBucketProperty(tableMetadata.getProperties());
List<SortingColumn> preferredOrderingColumns = getPreferredOrderingColumns(tableMetadata.getProperties());
// get the root directory for the database
SchemaTableName schemaTableName = tableMetadata.getTable();
String schemaName = schemaTableName.getSchemaName();
String tableName = schemaTableName.getTableName();
Optional<TableEncryptionProperties> tableEncryptionProperties = getTableEncryptionPropertiesFromTableProperties(tableMetadata, tableStorageFormat, partitionedBy);
List<HiveColumnHandle> columnHandles = getColumnHandles(tableMetadata, ImmutableSet.copyOf(partitionedBy), typeTranslator);
HiveStorageFormat partitionStorageFormat = isRespectTableFormat(session) ? tableStorageFormat : getHiveStorageFormat(session);
// unpartitioned tables ignore the partition storage format
HiveStorageFormat actualStorageFormat = partitionedBy.isEmpty() ? tableStorageFormat : partitionStorageFormat;
validateColumns(actualStorageFormat, columnHandles);
if (tableEncryptionProperties.isPresent() && tableStorageFormat != actualStorageFormat) {
throw new PrestoException(INVALID_TABLE_PROPERTY, format("For encrypted tables, partition format (%s) should match table format (%s). Using the session property %s or appropriately setting %s can help with ensuring this", actualStorageFormat.name(), tableStorageFormat.name(), RESPECT_TABLE_FORMAT, HIVE_STORAGE_FORMAT));
}
MetastoreContext metastoreContext = getMetastoreContext(session);
Map<String, HiveColumnHandle> columnHandlesByName = Maps.uniqueIndex(columnHandles, HiveColumnHandle::getName);
List<Column> partitionColumns = partitionedBy.stream().map(columnHandlesByName::get).map(columnHandle -> columnHandleToColumn(metastoreContext, columnHandle)).collect(toList());
checkPartitionTypesSupported(partitionColumns);
LocationHandle locationHandle = locationService.forNewTable(metastore, session, schemaName, tableName, isTempPathRequired(session, bucketProperty, preferredOrderingColumns));
HdfsContext context = new HdfsContext(session, schemaName, tableName, locationHandle.getTargetPath().toString(), true);
Map<String, String> tableProperties = getEmptyTableProperties(tableMetadata, context, tableStorageFormat, tableEncryptionProperties);
HiveOutputTableHandle result = new HiveOutputTableHandle(schemaName, tableName, columnHandles, metastore.generatePageSinkMetadata(metastoreContext, schemaTableName), locationHandle, tableStorageFormat, partitionStorageFormat, actualStorageFormat, getHiveCompressionCodec(session, false, actualStorageFormat), partitionedBy, bucketProperty, preferredOrderingColumns, session.getUser(), tableProperties, encryptionInformationProvider.getWriteEncryptionInformation(session, tableEncryptionProperties, schemaName, tableName));
WriteInfo writeInfo = locationService.getQueryWriteInfo(locationHandle);
metastore.declareIntentionToWrite(context, metastoreContext, writeInfo.getWriteMode(), writeInfo.getWritePath(), writeInfo.getTempPath(), schemaTableName, false);
return result;
}
use of com.facebook.presto.spi.StandardErrorCode.NOT_SUPPORTED in project presto by prestodb.
the class StoragePartitionLoader method loadPartition.
@Override
public ListenableFuture<?> loadPartition(HivePartitionMetadata partition, HiveSplitSource hiveSplitSource, boolean stopped) throws IOException {
String partitionName = partition.getHivePartition().getPartitionId();
Storage storage = partition.getPartition().map(Partition::getStorage).orElse(table.getStorage());
Properties schema = getPartitionSchema(table, partition.getPartition());
String inputFormatName = storage.getStorageFormat().getInputFormat();
int partitionDataColumnCount = partition.getPartition().map(p -> p.getColumns().size()).orElse(table.getDataColumns().size());
List<HivePartitionKey> partitionKeys = getPartitionKeys(table, partition.getPartition(), partitionName);
String location = getPartitionLocation(table, partition.getPartition());
if (location.isEmpty()) {
checkState(!shouldCreateFilesForMissingBuckets(table, session), "Empty location is only allowed for empty temporary table when zero-row file is not created");
return COMPLETED_FUTURE;
}
Path path = new Path(location);
Configuration configuration = hdfsEnvironment.getConfiguration(hdfsContext, path);
InputFormat<?, ?> inputFormat = getInputFormat(configuration, inputFormatName, false);
ExtendedFileSystem fs = hdfsEnvironment.getFileSystem(hdfsContext, path);
boolean s3SelectPushdownEnabled = shouldEnablePushdownForTable(session, table, path.toString(), partition.getPartition());
if (inputFormat instanceof SymlinkTextInputFormat) {
if (tableBucketInfo.isPresent()) {
throw new PrestoException(NOT_SUPPORTED, "Bucketed table in SymlinkTextInputFormat is not yet supported");
}
// TODO: This should use an iterator like the HiveFileIterator
ListenableFuture<?> lastResult = COMPLETED_FUTURE;
for (Path targetPath : getTargetPathsFromSymlink(fs, path)) {
// The input should be in TextInputFormat.
TextInputFormat targetInputFormat = new TextInputFormat();
// the splits must be generated using the file system for the target path
// get the configuration for the target path -- it may be a different hdfs instance
ExtendedFileSystem targetFilesystem = hdfsEnvironment.getFileSystem(hdfsContext, targetPath);
JobConf targetJob = toJobConf(targetFilesystem.getConf());
targetJob.setInputFormat(TextInputFormat.class);
targetInputFormat.configure(targetJob);
FileInputFormat.setInputPaths(targetJob, targetPath);
InputSplit[] targetSplits = targetInputFormat.getSplits(targetJob, 0);
InternalHiveSplitFactory splitFactory = new InternalHiveSplitFactory(targetFilesystem, inputFormat, pathDomain, getNodeSelectionStrategy(session), getMaxInitialSplitSize(session), s3SelectPushdownEnabled, new HiveSplitPartitionInfo(storage, path.toUri(), partitionKeys, partitionName, partitionDataColumnCount, partition.getTableToPartitionMapping(), Optional.empty(), partition.getRedundantColumnDomains()), schedulerUsesHostAddresses, partition.getEncryptionInformation());
lastResult = addSplitsToSource(targetSplits, splitFactory, hiveSplitSource, stopped);
if (stopped) {
return COMPLETED_FUTURE;
}
}
return lastResult;
}
Optional<HiveSplit.BucketConversion> bucketConversion = Optional.empty();
boolean bucketConversionRequiresWorkerParticipation = false;
if (partition.getPartition().isPresent()) {
Optional<HiveBucketProperty> partitionBucketProperty = partition.getPartition().get().getStorage().getBucketProperty();
if (tableBucketInfo.isPresent() && partitionBucketProperty.isPresent()) {
int tableBucketCount = tableBucketInfo.get().getTableBucketCount();
int partitionBucketCount = partitionBucketProperty.get().getBucketCount();
// Here, it's just trying to see if its needs the BucketConversion.
if (tableBucketCount != partitionBucketCount) {
bucketConversion = Optional.of(new HiveSplit.BucketConversion(tableBucketCount, partitionBucketCount, tableBucketInfo.get().getBucketColumns()));
if (tableBucketCount > partitionBucketCount) {
bucketConversionRequiresWorkerParticipation = true;
}
}
}
}
InternalHiveSplitFactory splitFactory = new InternalHiveSplitFactory(fs, inputFormat, pathDomain, getNodeSelectionStrategy(session), getMaxInitialSplitSize(session), s3SelectPushdownEnabled, new HiveSplitPartitionInfo(storage, path.toUri(), partitionKeys, partitionName, partitionDataColumnCount, partition.getTableToPartitionMapping(), bucketConversionRequiresWorkerParticipation ? bucketConversion : Optional.empty(), partition.getRedundantColumnDomains()), schedulerUsesHostAddresses, partition.getEncryptionInformation());
if (shouldUseFileSplitsFromInputFormat(inputFormat, configuration, table.getStorage().getLocation())) {
if (tableBucketInfo.isPresent()) {
throw new PrestoException(NOT_SUPPORTED, "Presto cannot read bucketed partition in an input format with UseFileSplitsFromInputFormat annotation: " + inputFormat.getClass().getSimpleName());
}
JobConf jobConf = toJobConf(configuration);
FileInputFormat.setInputPaths(jobConf, path);
// SerDes parameters and Table parameters passing into input format
fromProperties(schema).forEach(jobConf::set);
InputSplit[] splits = inputFormat.getSplits(jobConf, 0);
return addSplitsToSource(splits, splitFactory, hiveSplitSource, stopped);
}
PathFilter pathFilter = isHudiParquetInputFormat(inputFormat) ? hoodiePathFilterLoadingCache.getUnchecked(configuration) : path1 -> true;
// Streaming aggregation works at the granularity of individual files
// S3 Select pushdown works at the granularity of individual S3 objects,
// Partial aggregation pushdown works at the granularity of individual files
// therefore we must not split files when either is enabled.
// Skip header / footer lines are not splittable except for a special case when skip.header.line.count=1
boolean splittable = isFileSplittable(session) && !isStreamingAggregationEnabled(session) && !s3SelectPushdownEnabled && !partialAggregationsPushedDown && getFooterCount(schema) == 0 && getHeaderCount(schema) <= 1;
// Bucketed partitions are fully loaded immediately since all files must be loaded to determine the file to bucket mapping
if (tableBucketInfo.isPresent()) {
if (tableBucketInfo.get().isVirtuallyBucketed()) {
// For virtual bucket, bucket conversion must not be present because there is no physical partition bucket count
checkState(!bucketConversion.isPresent(), "Virtually bucketed table must not have partitions that are physically bucketed");
checkState(tableBucketInfo.get().getTableBucketCount() == tableBucketInfo.get().getReadBucketCount(), "Table and read bucket count should be the same for virtual bucket");
return hiveSplitSource.addToQueue(getVirtuallyBucketedSplits(path, fs, splitFactory, tableBucketInfo.get().getReadBucketCount(), splittable, pathFilter));
}
return hiveSplitSource.addToQueue(getBucketedSplits(path, fs, splitFactory, tableBucketInfo.get(), bucketConversion, partitionName, splittable, pathFilter));
}
fileIterators.addLast(createInternalHiveSplitIterator(path, fs, splitFactory, splittable, pathFilter, partition.getPartition()));
return COMPLETED_FUTURE;
}
Aggregations