use of io.trino.spi.StandardErrorCode.NOT_SUPPORTED in project trino by trinodb.
the class HiveMetadata method getTableHandleForOptimize.
private Optional<ConnectorTableExecuteHandle> getTableHandleForOptimize(ConnectorSession session, ConnectorTableHandle tableHandle, Map<String, Object> executeProperties, RetryMode retryMode) {
// TODO lots of that is copied from beginInsert; rafactoring opportunity
if (!isNonTransactionalOptimizeEnabled(session)) {
// post-optimize data files duplicate rows will be left in table and manual cleanup from user will be required.
throw new TrinoException(NOT_SUPPORTED, "OPTIMIZE procedure must be explicitly enabled via " + NON_TRANSACTIONAL_OPTIMIZE_ENABLED + " session property");
}
if (retryMode != NO_RETRIES) {
throw new TrinoException(NOT_SUPPORTED, "OPTIMIZE procedure is not supported with query retries enabled");
}
HiveTableHandle hiveTableHandle = (HiveTableHandle) tableHandle;
SchemaTableName tableName = hiveTableHandle.getSchemaTableName();
Table table = metastore.getTable(tableName.getSchemaName(), tableName.getTableName()).orElseThrow(() -> new TableNotFoundException(tableName));
checkTableIsWritable(table, writesToNonManagedTablesEnabled);
for (Column column : table.getDataColumns()) {
if (!isWritableType(column.getType())) {
throw new TrinoException(NOT_SUPPORTED, format("Optimizing Hive table %s with column type %s not supported", tableName, column.getType()));
}
}
if (isTransactionalTable(table.getParameters())) {
throw new TrinoException(NOT_SUPPORTED, format("Optimizing transactional Hive table %s is not supported", tableName));
}
if (table.getStorage().getBucketProperty().isPresent()) {
throw new TrinoException(NOT_SUPPORTED, format("Optimizing bucketed Hive table %s is not supported", tableName));
}
// TODO forcing NANOSECONDS precision here so we do not loose data. In future we may be smarter; options:
// - respect timestamp_precision but recognize situation when rounding occurs, and fail query
// - detect data's precision and maintain it
List<HiveColumnHandle> columns = hiveColumnHandles(table, typeManager, NANOSECONDS).stream().filter(columnHandle -> !columnHandle.isHidden()).collect(toImmutableList());
HiveStorageFormat tableStorageFormat = extractHiveStorageFormat(table);
Optional.ofNullable(table.getParameters().get(SKIP_HEADER_COUNT_KEY)).map(Integer::parseInt).ifPresent(headerSkipCount -> {
if (headerSkipCount > 1) {
throw new TrinoException(NOT_SUPPORTED, format("Optimizing Hive table %s with value of %s property greater than 1 is not supported", tableName, SKIP_HEADER_COUNT_KEY));
}
});
if (table.getParameters().containsKey(SKIP_FOOTER_COUNT_KEY)) {
throw new TrinoException(NOT_SUPPORTED, format("Optimizing Hive table %s with %s property not supported", tableName, SKIP_FOOTER_COUNT_KEY));
}
LocationHandle locationHandle = locationService.forOptimize(metastore, session, table);
DataSize fileSizeThreshold = (DataSize) executeProperties.get("file_size_threshold");
return Optional.of(new HiveTableExecuteHandle(OptimizeTableProcedure.NAME, Optional.empty(), Optional.of(fileSizeThreshold.toBytes()), tableName.getSchemaName(), tableName.getTableName(), columns, metastore.generatePageSinkMetadata(tableName), locationHandle, table.getStorage().getBucketProperty(), tableStorageFormat, // TODO: test with multiple partitions using different storage format
tableStorageFormat, NO_ACID_TRANSACTION, retryMode != NO_RETRIES));
}
use of io.trino.spi.StandardErrorCode.NOT_SUPPORTED in project trino by trinodb.
the class HiveMetadata method validateColumns.
private static void validateColumns(ConnectorTableMetadata tableMetadata) {
// Validate the name and the type of each column
for (ColumnMetadata column : tableMetadata.getColumns()) {
String columnName = column.getName();
if (columnName.startsWith(" ")) {
throw new TrinoException(NOT_SUPPORTED, format("Hive column names must not start with a space: '%s'", columnName));
}
if (columnName.endsWith(" ")) {
throw new TrinoException(NOT_SUPPORTED, format("Hive column names must not end with a space: '%s'", columnName));
}
if (columnName.contains(",")) {
throw new TrinoException(NOT_SUPPORTED, format("Hive column names must not contain commas: '%s'", columnName));
}
// validate type is supported
toHiveType(column.getType());
}
if (getHiveStorageFormat(tableMetadata.getProperties()) != HiveStorageFormat.CSV) {
return;
}
Set<String> partitionedBy = ImmutableSet.copyOf(getPartitionedBy(tableMetadata.getProperties()));
List<ColumnMetadata> unsupportedColumns = tableMetadata.getColumns().stream().filter(columnMetadata -> !partitionedBy.contains(columnMetadata.getName())).filter(columnMetadata -> !columnMetadata.getType().equals(createUnboundedVarcharType())).collect(toImmutableList());
if (!unsupportedColumns.isEmpty()) {
String joinedUnsupportedColumns = unsupportedColumns.stream().map(columnMetadata -> format("%s %s", columnMetadata.getName(), columnMetadata.getType())).collect(joining(", "));
throw new TrinoException(NOT_SUPPORTED, "Hive CSV storage format only supports VARCHAR (unbounded). Unsupported columns: " + joinedUnsupportedColumns);
}
}
use of io.trino.spi.StandardErrorCode.NOT_SUPPORTED in project trino by trinodb.
the class HiveMetadata method beginUpdate.
@Override
public ConnectorTableHandle beginUpdate(ConnectorSession session, ConnectorTableHandle tableHandle, List<ColumnHandle> updatedColumns, RetryMode retryMode) {
HiveTableHandle hiveTableHandle = (HiveTableHandle) tableHandle;
SchemaTableName tableName = hiveTableHandle.getSchemaTableName();
Table table = metastore.getTable(tableName.getSchemaName(), tableName.getTableName()).orElseThrow(() -> new TableNotFoundException(tableName));
if (!isFullAcidTable(table.getParameters())) {
throw new TrinoException(NOT_SUPPORTED, "Hive update is only supported for ACID transactional tables");
}
if (!autoCommit) {
throw new TrinoException(NOT_SUPPORTED, "Updating transactional tables is not supported in explicit transactions (use autocommit mode)");
}
if (isSparkBucketedTable(table)) {
throw new TrinoException(NOT_SUPPORTED, "Updating Spark bucketed tables is not supported");
}
// Verify that none of the updated columns are partition columns or bucket columns
Set<String> updatedColumnNames = updatedColumns.stream().map(handle -> ((HiveColumnHandle) handle).getName()).collect(toImmutableSet());
Set<String> partitionColumnNames = table.getPartitionColumns().stream().map(Column::getName).collect(toImmutableSet());
if (!intersection(updatedColumnNames, partitionColumnNames).isEmpty()) {
throw new TrinoException(NOT_SUPPORTED, "Updating Hive table partition columns is not supported");
}
hiveTableHandle.getBucketHandle().ifPresent(handle -> {
Set<String> bucketColumnNames = handle.getColumns().stream().map(HiveColumnHandle::getName).collect(toImmutableSet());
if (!intersection(updatedColumnNames, bucketColumnNames).isEmpty()) {
throw new TrinoException(NOT_SUPPORTED, "Updating Hive table bucket columns is not supported");
}
});
checkTableIsWritable(table, writesToNonManagedTablesEnabled);
for (Column column : table.getDataColumns()) {
if (!isWritableType(column.getType())) {
throw new TrinoException(NOT_SUPPORTED, format("Updating a Hive table with column type %s not supported", column.getType()));
}
}
List<HiveColumnHandle> allDataColumns = getRegularColumnHandles(table, typeManager, getTimestampPrecision(session)).stream().filter(columnHandle -> !columnHandle.isHidden()).collect(toImmutableList());
List<HiveColumnHandle> hiveUpdatedColumns = updatedColumns.stream().map(HiveColumnHandle.class::cast).collect(toImmutableList());
if (table.getParameters().containsKey(SKIP_HEADER_COUNT_KEY)) {
throw new TrinoException(NOT_SUPPORTED, format("Updating a Hive table with %s property not supported", SKIP_HEADER_COUNT_KEY));
}
if (table.getParameters().containsKey(SKIP_FOOTER_COUNT_KEY)) {
throw new TrinoException(NOT_SUPPORTED, format("Updating a Hive table with %s property not supported", SKIP_FOOTER_COUNT_KEY));
}
if (retryMode != NO_RETRIES) {
throw new TrinoException(NOT_SUPPORTED, "Updating a Hive tables is not supported with query retries enabled");
}
LocationHandle locationHandle = locationService.forExistingTable(metastore, session, table);
HiveUpdateProcessor updateProcessor = new HiveUpdateProcessor(allDataColumns, hiveUpdatedColumns);
AcidTransaction transaction = metastore.beginUpdate(session, table, updateProcessor);
HiveTableHandle updateHandle = hiveTableHandle.withTransaction(transaction);
WriteInfo writeInfo = locationService.getQueryWriteInfo(locationHandle);
metastore.declareIntentionToWrite(session, writeInfo.getWriteMode(), writeInfo.getWritePath(), tableName);
return updateHandle;
}
Aggregations