use of io.trino.spi.connector.SchemaTableName in project trino by trinodb.
the class ExampleMetadata method listTables.
@Override
public List<SchemaTableName> listTables(ConnectorSession session, Optional<String> optionalSchemaName) {
Set<String> schemaNames = optionalSchemaName.map(ImmutableSet::of).orElseGet(() -> ImmutableSet.copyOf(exampleClient.getSchemaNames()));
ImmutableList.Builder<SchemaTableName> builder = ImmutableList.builder();
for (String schemaName : schemaNames) {
for (String tableName : exampleClient.getTableNames(schemaName)) {
builder.add(new SchemaTableName(schemaName, tableName));
}
}
return builder.build();
}
use of io.trino.spi.connector.SchemaTableName in project trino by trinodb.
the class HiveMetadata method listTables.
private List<SchemaTableName> listTables(ConnectorSession session, SchemaTablePrefix prefix) {
if (prefix.getSchema().map(HiveUtil::isHiveSystemSchema).orElse(false)) {
return ImmutableList.of();
}
if (prefix.getTable().isEmpty()) {
return listTables(session, prefix.getSchema());
}
SchemaTableName tableName = prefix.toSchemaTableName();
Optional<Table> optionalTable;
try {
optionalTable = metastore.getTable(tableName.getSchemaName(), tableName.getTableName());
} catch (HiveViewNotSupportedException e) {
// exists, would be returned by listTables from schema
return ImmutableList.of(tableName);
}
return optionalTable.filter(table -> !hideDeltaLakeTables || !isDeltaLakeTable(table)).map(table -> ImmutableList.of(tableName)).orElseGet(ImmutableList::of);
}
use of io.trino.spi.connector.SchemaTableName in project trino by trinodb.
the class HiveMetadata method beginDelete.
@Override
public ConnectorTableHandle beginDelete(ConnectorSession session, ConnectorTableHandle tableHandle, RetryMode retryMode) {
HiveTableHandle handle = (HiveTableHandle) tableHandle;
SchemaTableName tableName = handle.getSchemaTableName();
Table table = metastore.getTable(tableName.getSchemaName(), tableName.getTableName()).orElseThrow(() -> new TableNotFoundException(tableName));
ensureTableSupportsDelete(table);
if (retryMode != NO_RETRIES) {
throw new TrinoException(NOT_SUPPORTED, "Deleting from Hive tables is not supported with query retries enabled");
}
if (!autoCommit) {
throw new TrinoException(NOT_SUPPORTED, "Deleting from Hive transactional tables is not supported in explicit transactions (use autocommit mode)");
}
if (isSparkBucketedTable(table)) {
throw new TrinoException(NOT_SUPPORTED, "Deleting from Spark bucketed tables is not supported");
}
LocationHandle locationHandle = locationService.forExistingTable(metastore, session, table);
AcidTransaction transaction = metastore.beginDelete(session, table);
WriteInfo writeInfo = locationService.getQueryWriteInfo(locationHandle);
metastore.declareIntentionToWrite(session, writeInfo.getWriteMode(), writeInfo.getWritePath(), handle.getSchemaTableName());
return handle.withTransaction(transaction);
}
use of io.trino.spi.connector.SchemaTableName in project trino by trinodb.
the class HiveMetadata method finishDelete.
@Override
public void finishDelete(ConnectorSession session, ConnectorTableHandle tableHandle, Collection<Slice> fragments) {
HiveTableHandle handle = (HiveTableHandle) tableHandle;
checkArgument(handle.isAcidDelete(), "handle should be a delete handle, but is %s", handle);
requireNonNull(fragments, "fragments is null");
SchemaTableName tableName = handle.getSchemaTableName();
Table table = metastore.getTable(tableName.getSchemaName(), tableName.getTableName()).orElseThrow(() -> new TableNotFoundException(tableName));
ensureTableSupportsDelete(table);
List<PartitionAndStatementId> partitionAndStatementIds = fragments.stream().map(Slice::getBytes).map(PartitionAndStatementId.CODEC::fromJson).collect(toImmutableList());
HdfsContext context = new HdfsContext(session);
for (PartitionAndStatementId ps : partitionAndStatementIds) {
createOrcAcidVersionFile(context, new Path(ps.getDeleteDeltaDirectory()));
}
LocationHandle locationHandle = locationService.forExistingTable(metastore, session, table);
WriteInfo writeInfo = locationService.getQueryWriteInfo(locationHandle);
metastore.finishRowLevelDelete(session, table.getDatabaseName(), table.getTableName(), writeInfo.getWritePath(), partitionAndStatementIds);
}
use of io.trino.spi.connector.SchemaTableName in project trino by trinodb.
the class HiveMetadata method beginInsert.
@Override
public HiveInsertTableHandle beginInsert(ConnectorSession session, ConnectorTableHandle tableHandle, List<ColumnHandle> columns, RetryMode retryMode) {
SchemaTableName tableName = ((HiveTableHandle) tableHandle).getSchemaTableName();
Table table = metastore.getTable(tableName.getSchemaName(), tableName.getTableName()).orElseThrow(() -> new TableNotFoundException(tableName));
checkTableIsWritable(table, writesToNonManagedTablesEnabled);
for (Column column : table.getDataColumns()) {
if (!isWritableType(column.getType())) {
throw new TrinoException(NOT_SUPPORTED, format("Inserting into Hive table %s with column type %s not supported", tableName, column.getType()));
}
}
boolean isTransactional = isTransactionalTable(table.getParameters());
if (isTransactional && retryMode != NO_RETRIES) {
throw new TrinoException(NOT_SUPPORTED, "Inserting into Hive transactional tables is not supported with query retries enabled");
}
if (isTransactional && !autoCommit) {
throw new TrinoException(NOT_SUPPORTED, "Inserting into Hive transactional tables is not supported in explicit transactions (use autocommit mode)");
}
if (isSparkBucketedTable(table)) {
throw new TrinoException(NOT_SUPPORTED, "Inserting into Spark bucketed tables is not supported");
}
List<HiveColumnHandle> handles = hiveColumnHandles(table, typeManager, getTimestampPrecision(session)).stream().filter(columnHandle -> !columnHandle.isHidden()).collect(toImmutableList());
HiveStorageFormat tableStorageFormat = extractHiveStorageFormat(table);
Optional.ofNullable(table.getParameters().get(SKIP_HEADER_COUNT_KEY)).map(Integer::parseInt).ifPresent(headerSkipCount -> {
if (headerSkipCount > 1) {
throw new TrinoException(NOT_SUPPORTED, format("Inserting into Hive table with value of %s property greater than 1 is not supported", SKIP_HEADER_COUNT_KEY));
}
});
if (table.getParameters().containsKey(SKIP_FOOTER_COUNT_KEY)) {
throw new TrinoException(NOT_SUPPORTED, format("Inserting into Hive table with %s property not supported", SKIP_FOOTER_COUNT_KEY));
}
LocationHandle locationHandle = locationService.forExistingTable(metastore, session, table);
AcidTransaction transaction = isTransactional ? metastore.beginInsert(session, table) : NO_ACID_TRANSACTION;
HiveInsertTableHandle result = new HiveInsertTableHandle(tableName.getSchemaName(), tableName.getTableName(), handles, metastore.generatePageSinkMetadata(tableName), locationHandle, table.getStorage().getBucketProperty(), tableStorageFormat, isRespectTableFormat(session) ? tableStorageFormat : getHiveStorageFormat(session), transaction, retryMode != NO_RETRIES);
WriteInfo writeInfo = locationService.getQueryWriteInfo(locationHandle);
if (getInsertExistingPartitionsBehavior(session) == InsertExistingPartitionsBehavior.OVERWRITE && writeInfo.getWriteMode() == DIRECT_TO_TARGET_EXISTING_DIRECTORY) {
if (isTransactional) {
throw new TrinoException(NOT_SUPPORTED, "Overwriting existing partition in transactional tables doesn't support DIRECT_TO_TARGET_EXISTING_DIRECTORY write mode");
}
// Partition overwrite operation is nonatomic thus can't and shouldn't be used in non autocommit context.
if (!autoCommit) {
throw new TrinoException(NOT_SUPPORTED, "Overwriting existing partition in non auto commit context doesn't support DIRECT_TO_TARGET_EXISTING_DIRECTORY write mode");
}
}
metastore.declareIntentionToWrite(session, writeInfo.getWriteMode(), writeInfo.getWritePath(), tableName);
return result;
}
Aggregations