use of io.trino.spi.StandardErrorCode.NOT_SUPPORTED in project trino by trinodb.
the class AbstractGreatestLeast method generate.
private Class<?> generate(List<Class<?>> javaTypes, MethodHandle compareMethod) {
Signature signature = getFunctionMetadata().getSignature();
checkCondition(javaTypes.size() <= 127, NOT_SUPPORTED, "Too many arguments for function call %s()", signature.getName());
String javaTypeName = javaTypes.stream().map(Class::getSimpleName).collect(joining());
ClassDefinition definition = new ClassDefinition(a(PUBLIC, FINAL), makeClassName(javaTypeName + "$" + signature.getName()), type(Object.class));
definition.declareDefaultConstructor(a(PRIVATE));
List<Parameter> parameters = IntStream.range(0, javaTypes.size()).mapToObj(i -> arg("arg" + i, javaTypes.get(i))).collect(toImmutableList());
MethodDefinition method = definition.declareMethod(a(PUBLIC, STATIC), signature.getName(), type(wrap(javaTypes.get(0))), parameters);
Scope scope = method.getScope();
BytecodeBlock body = method.getBody();
CallSiteBinder binder = new CallSiteBinder();
Variable value = scope.declareVariable(wrap(javaTypes.get(0)), "value");
BytecodeExpression nullValue = constantNull(wrap(javaTypes.get(0)));
body.append(value.set(nullValue));
LabelNode done = new LabelNode("done");
compareMethod = compareMethod.asType(methodType(boolean.class, compareMethod.type().wrap().parameterList()));
for (int i = 0; i < javaTypes.size(); i++) {
Parameter parameter = parameters.get(i);
BytecodeExpression invokeCompare = invokeDynamic(BOOTSTRAP_METHOD, ImmutableList.of(binder.bind(compareMethod).getBindingId()), "compare", boolean.class, parameter, value);
body.append(new IfStatement().condition(isNull(parameter)).ifTrue(new BytecodeBlock().append(value.set(nullValue)).gotoLabel(done)));
body.append(new IfStatement().condition(or(isNull(value), invokeCompare)).ifTrue(value.set(parameter)));
}
body.visitLabel(done);
body.append(value.ret());
return defineClass(definition, Object.class, binder.getBindings(), new DynamicClassLoader(getClass().getClassLoader()));
}
use of io.trino.spi.StandardErrorCode.NOT_SUPPORTED in project trino by trinodb.
the class SetViewAuthorizationTask method execute.
@Override
public ListenableFuture<Void> execute(SetViewAuthorization statement, QueryStateMachine stateMachine, List<Expression> parameters, WarningCollector warningCollector) {
Session session = stateMachine.getSession();
QualifiedObjectName viewName = createQualifiedObjectName(session, statement, statement.getSource());
getRequiredCatalogHandle(metadata, session, statement, viewName.getCatalogName());
ViewDefinition view = metadata.getView(session, viewName).orElseThrow(() -> semanticException(TABLE_NOT_FOUND, statement, "View '%s' does not exist", viewName));
TrinoPrincipal principal = createPrincipal(statement.getPrincipal());
checkRoleExists(session, statement, metadata, principal, Optional.of(viewName.getCatalogName()).filter(catalog -> metadata.isCatalogManagedSecurity(session, catalog)));
if (!view.isRunAsInvoker() && !isAllowSetViewAuthorization) {
throw new TrinoException(NOT_SUPPORTED, format("Cannot set authorization for view %s to %s: this feature is disabled", viewName.getCatalogName() + '.' + viewName.getSchemaName() + '.' + viewName.getObjectName(), principal));
}
accessControl.checkCanSetViewAuthorization(session.toSecurityContext(), viewName, principal);
metadata.setViewAuthorization(session, viewName.asCatalogSchemaTableName(), principal);
return immediateVoidFuture();
}
use of io.trino.spi.StandardErrorCode.NOT_SUPPORTED in project trino by trinodb.
the class HiveMetadata method beginInsert.
@Override
public HiveInsertTableHandle beginInsert(ConnectorSession session, ConnectorTableHandle tableHandle, List<ColumnHandle> columns, RetryMode retryMode) {
SchemaTableName tableName = ((HiveTableHandle) tableHandle).getSchemaTableName();
Table table = metastore.getTable(tableName.getSchemaName(), tableName.getTableName()).orElseThrow(() -> new TableNotFoundException(tableName));
checkTableIsWritable(table, writesToNonManagedTablesEnabled);
for (Column column : table.getDataColumns()) {
if (!isWritableType(column.getType())) {
throw new TrinoException(NOT_SUPPORTED, format("Inserting into Hive table %s with column type %s not supported", tableName, column.getType()));
}
}
boolean isTransactional = isTransactionalTable(table.getParameters());
if (isTransactional && retryMode != NO_RETRIES) {
throw new TrinoException(NOT_SUPPORTED, "Inserting into Hive transactional tables is not supported with query retries enabled");
}
if (isTransactional && !autoCommit) {
throw new TrinoException(NOT_SUPPORTED, "Inserting into Hive transactional tables is not supported in explicit transactions (use autocommit mode)");
}
if (isSparkBucketedTable(table)) {
throw new TrinoException(NOT_SUPPORTED, "Inserting into Spark bucketed tables is not supported");
}
List<HiveColumnHandle> handles = hiveColumnHandles(table, typeManager, getTimestampPrecision(session)).stream().filter(columnHandle -> !columnHandle.isHidden()).collect(toImmutableList());
HiveStorageFormat tableStorageFormat = extractHiveStorageFormat(table);
Optional.ofNullable(table.getParameters().get(SKIP_HEADER_COUNT_KEY)).map(Integer::parseInt).ifPresent(headerSkipCount -> {
if (headerSkipCount > 1) {
throw new TrinoException(NOT_SUPPORTED, format("Inserting into Hive table with value of %s property greater than 1 is not supported", SKIP_HEADER_COUNT_KEY));
}
});
if (table.getParameters().containsKey(SKIP_FOOTER_COUNT_KEY)) {
throw new TrinoException(NOT_SUPPORTED, format("Inserting into Hive table with %s property not supported", SKIP_FOOTER_COUNT_KEY));
}
LocationHandle locationHandle = locationService.forExistingTable(metastore, session, table);
AcidTransaction transaction = isTransactional ? metastore.beginInsert(session, table) : NO_ACID_TRANSACTION;
HiveInsertTableHandle result = new HiveInsertTableHandle(tableName.getSchemaName(), tableName.getTableName(), handles, metastore.generatePageSinkMetadata(tableName), locationHandle, table.getStorage().getBucketProperty(), tableStorageFormat, isRespectTableFormat(session) ? tableStorageFormat : getHiveStorageFormat(session), transaction, retryMode != NO_RETRIES);
WriteInfo writeInfo = locationService.getQueryWriteInfo(locationHandle);
if (getInsertExistingPartitionsBehavior(session) == InsertExistingPartitionsBehavior.OVERWRITE && writeInfo.getWriteMode() == DIRECT_TO_TARGET_EXISTING_DIRECTORY) {
if (isTransactional) {
throw new TrinoException(NOT_SUPPORTED, "Overwriting existing partition in transactional tables doesn't support DIRECT_TO_TARGET_EXISTING_DIRECTORY write mode");
}
// Partition overwrite operation is nonatomic thus can't and shouldn't be used in non autocommit context.
if (!autoCommit) {
throw new TrinoException(NOT_SUPPORTED, "Overwriting existing partition in non auto commit context doesn't support DIRECT_TO_TARGET_EXISTING_DIRECTORY write mode");
}
}
metastore.declareIntentionToWrite(session, writeInfo.getWriteMode(), writeInfo.getWritePath(), tableName);
return result;
}
use of io.trino.spi.StandardErrorCode.NOT_SUPPORTED in project trino by trinodb.
the class HiveMetadata method getNewTableLayout.
@Override
public Optional<ConnectorTableLayout> getNewTableLayout(ConnectorSession session, ConnectorTableMetadata tableMetadata) {
validateTimestampColumns(tableMetadata.getColumns(), getTimestampPrecision(session));
validatePartitionColumns(tableMetadata);
validateBucketColumns(tableMetadata);
validateColumns(tableMetadata);
Optional<HiveBucketProperty> bucketProperty = getBucketProperty(tableMetadata.getProperties());
List<String> partitionedBy = getPartitionedBy(tableMetadata.getProperties());
if (bucketProperty.isEmpty()) {
// return preferred layout which is partitioned by partition columns
if (partitionedBy.isEmpty()) {
return Optional.empty();
}
return Optional.of(new ConnectorTableLayout(partitionedBy));
}
if (!bucketProperty.get().getSortedBy().isEmpty() && !isSortedWritingEnabled(session)) {
throw new TrinoException(NOT_SUPPORTED, "Writing to bucketed sorted Hive tables is disabled");
}
List<String> bucketedBy = bucketProperty.get().getBucketedBy();
Map<String, HiveType> hiveTypeMap = tableMetadata.getColumns().stream().collect(toMap(ColumnMetadata::getName, column -> toHiveType(column.getType())));
return Optional.of(new ConnectorTableLayout(new HivePartitioningHandle(bucketProperty.get().getBucketingVersion(), bucketProperty.get().getBucketCount(), bucketedBy.stream().map(hiveTypeMap::get).collect(toImmutableList()), OptionalInt.of(bucketProperty.get().getBucketCount()), !partitionedBy.isEmpty() && isParallelPartitionedBucketedWrites(session)), ImmutableList.<String>builder().addAll(bucketedBy).addAll(partitionedBy).build()));
}
use of io.trino.spi.StandardErrorCode.NOT_SUPPORTED in project trino by trinodb.
the class HiveMetadata method getInsertLayout.
@Override
public Optional<ConnectorTableLayout> getInsertLayout(ConnectorSession session, ConnectorTableHandle tableHandle) {
HiveTableHandle hiveTableHandle = (HiveTableHandle) tableHandle;
SchemaTableName tableName = hiveTableHandle.getSchemaTableName();
Table table = metastore.getTable(tableName.getSchemaName(), tableName.getTableName()).orElseThrow(() -> new TableNotFoundException(tableName));
if (table.getStorage().getBucketProperty().isPresent()) {
if (!isSupportedBucketing(table)) {
throw new TrinoException(NOT_SUPPORTED, "Cannot write to a table bucketed on an unsupported type");
}
} else // Note: we cannot use hiveTableHandle.isInAcidTransaction() here as transaction is not yet set in HiveTableHandle when getInsertLayout is called
if (isFullAcidTable(table.getParameters())) {
table = Table.builder(table).withStorage(storage -> storage.setBucketProperty(Optional.of(new HiveBucketProperty(ImmutableList.of(), HiveBucketing.BucketingVersion.BUCKETING_V2, 1, ImmutableList.of())))).build();
}
Optional<HiveBucketHandle> hiveBucketHandle = getHiveBucketHandle(session, table, typeManager);
List<Column> partitionColumns = table.getPartitionColumns();
if (hiveBucketHandle.isEmpty()) {
// return preferred layout which is partitioned by partition columns
if (partitionColumns.isEmpty()) {
return Optional.empty();
}
return Optional.of(new ConnectorTableLayout(partitionColumns.stream().map(Column::getName).collect(toImmutableList())));
}
HiveBucketProperty bucketProperty = table.getStorage().getBucketProperty().orElseThrow(() -> new NoSuchElementException("Bucket property should be set"));
if (!bucketProperty.getSortedBy().isEmpty() && !isSortedWritingEnabled(session)) {
throw new TrinoException(NOT_SUPPORTED, "Writing to bucketed sorted Hive tables is disabled");
}
ImmutableList.Builder<String> partitioningColumns = ImmutableList.builder();
hiveBucketHandle.get().getColumns().stream().map(HiveColumnHandle::getName).forEach(partitioningColumns::add);
partitionColumns.stream().map(Column::getName).forEach(partitioningColumns::add);
HivePartitioningHandle partitioningHandle = new HivePartitioningHandle(hiveBucketHandle.get().getBucketingVersion(), hiveBucketHandle.get().getTableBucketCount(), hiveBucketHandle.get().getColumns().stream().map(HiveColumnHandle::getHiveType).collect(toImmutableList()), OptionalInt.of(hiveBucketHandle.get().getTableBucketCount()), !partitionColumns.isEmpty() && isParallelPartitionedBucketedWrites(session));
return Optional.of(new ConnectorTableLayout(partitioningHandle, partitioningColumns.build()));
}
Aggregations