use of io.trino.spi.connector.ConnectorTableHandle in project trino by trinodb.
the class AbstractTestHive method insertBucketedTableLayout.
protected void insertBucketedTableLayout(boolean transactional) throws Exception {
SchemaTableName tableName = temporaryTable("empty_bucketed_table");
try {
List<Column> columns = ImmutableList.of(new Column("column1", HIVE_STRING, Optional.empty()), new Column("column2", HIVE_LONG, Optional.empty()));
HiveBucketProperty bucketProperty = new HiveBucketProperty(ImmutableList.of("column1"), BUCKETING_V1, 4, ImmutableList.of());
createEmptyTable(tableName, ORC, columns, ImmutableList.of(), Optional.of(bucketProperty), transactional);
try (Transaction transaction = newTransaction()) {
ConnectorMetadata metadata = transaction.getMetadata();
ConnectorSession session = newSession();
ConnectorTableHandle tableHandle = getTableHandle(metadata, tableName);
Optional<ConnectorTableLayout> insertLayout = metadata.getInsertLayout(session, tableHandle);
assertTrue(insertLayout.isPresent());
ConnectorPartitioningHandle partitioningHandle = new HivePartitioningHandle(bucketProperty.getBucketingVersion(), bucketProperty.getBucketCount(), ImmutableList.of(HIVE_STRING), OptionalInt.empty(), false);
assertEquals(insertLayout.get().getPartitioning(), Optional.of(partitioningHandle));
assertEquals(insertLayout.get().getPartitionColumns(), ImmutableList.of("column1"));
ConnectorBucketNodeMap connectorBucketNodeMap = nodePartitioningProvider.getBucketNodeMap(transaction.getTransactionHandle(), session, partitioningHandle);
assertEquals(connectorBucketNodeMap.getBucketCount(), 4);
assertFalse(connectorBucketNodeMap.hasFixedMapping());
}
} finally {
dropTable(tableName);
}
}
use of io.trino.spi.connector.ConnectorTableHandle in project trino by trinodb.
the class AbstractTestHive method testGetPartitionSplitsTableNotReadablePartition.
@Test
public void testGetPartitionSplitsTableNotReadablePartition() {
try (Transaction transaction = newTransaction()) {
ConnectorMetadata metadata = transaction.getMetadata();
ConnectorSession session = newSession();
ConnectorTableHandle tableHandle = getTableHandle(metadata, tableNotReadable);
assertNotNull(tableHandle);
try {
getSplitCount(getSplits(splitManager, transaction, session, tableHandle));
fail("Expected HiveNotReadableException");
} catch (HiveNotReadableException e) {
assertThat(e).hasMessageMatching("Table '.*\\.trino_test_not_readable' is not readable: reason for not readable");
assertEquals(e.getTableName(), tableNotReadable);
assertEquals(e.getPartition(), Optional.empty());
}
}
}
use of io.trino.spi.connector.ConnectorTableHandle in project trino by trinodb.
the class IcebergMetadata method applyFilter.
@Override
public Optional<ConstraintApplicationResult<ConnectorTableHandle>> applyFilter(ConnectorSession session, ConnectorTableHandle handle, Constraint constraint) {
IcebergTableHandle table = (IcebergTableHandle) handle;
Table icebergTable = catalog.loadTable(session, table.getSchemaTableName());
Set<Integer> partitionSourceIds = identityPartitionColumnsInAllSpecs(icebergTable);
BiPredicate<IcebergColumnHandle, Domain> isIdentityPartition = (column, domain) -> partitionSourceIds.contains(column.getId());
TupleDomain<IcebergColumnHandle> newEnforcedConstraint = constraint.getSummary().transformKeys(IcebergColumnHandle.class::cast).filter(isIdentityPartition).intersect(table.getEnforcedPredicate());
TupleDomain<IcebergColumnHandle> remainingConstraint = constraint.getSummary().transformKeys(IcebergColumnHandle.class::cast).filter(isIdentityPartition.negate());
TupleDomain<IcebergColumnHandle> newUnenforcedConstraint = remainingConstraint.filter((columnHandle, predicate) -> !isStructuralType(columnHandle.getType())).intersect(table.getUnenforcedPredicate());
if (newEnforcedConstraint.equals(table.getEnforcedPredicate()) && newUnenforcedConstraint.equals(table.getUnenforcedPredicate())) {
return Optional.empty();
}
return Optional.of(new ConstraintApplicationResult<>(new IcebergTableHandle(table.getSchemaName(), table.getTableName(), table.getTableType(), table.getSnapshotId(), newUnenforcedConstraint, newEnforcedConstraint, table.getProjectedColumns(), table.getNameMappingJson()), remainingConstraint.transformKeys(ColumnHandle.class::cast), false));
}
use of io.trino.spi.connector.ConnectorTableHandle in project trino by trinodb.
the class IcebergMetadata method finishRefreshMaterializedView.
@Override
public Optional<ConnectorOutputMetadata> finishRefreshMaterializedView(ConnectorSession session, ConnectorTableHandle tableHandle, ConnectorInsertTableHandle insertHandle, Collection<Slice> fragments, Collection<ComputedStatistics> computedStatistics, List<ConnectorTableHandle> sourceTableHandles) {
// delete before insert .. simulating overwrite
executeDelete(session, tableHandle);
IcebergWritableTableHandle table = (IcebergWritableTableHandle) insertHandle;
Table icebergTable = transaction.table();
List<CommitTaskData> commitTasks = fragments.stream().map(slice -> commitTaskCodec.fromJson(slice.getBytes())).collect(toImmutableList());
Type[] partitionColumnTypes = icebergTable.spec().fields().stream().map(field -> field.transform().getResultType(icebergTable.schema().findType(field.sourceId()))).toArray(Type[]::new);
AppendFiles appendFiles = transaction.newFastAppend();
ImmutableSet.Builder<String> writtenFiles = ImmutableSet.builder();
for (CommitTaskData task : commitTasks) {
DataFiles.Builder builder = DataFiles.builder(icebergTable.spec()).withPath(task.getPath()).withFileSizeInBytes(task.getFileSizeInBytes()).withFormat(table.getFileFormat().toIceberg()).withMetrics(task.getMetrics().metrics());
if (!icebergTable.spec().fields().isEmpty()) {
String partitionDataJson = task.getPartitionDataJson().orElseThrow(() -> new VerifyException("No partition data for partitioned table"));
builder.withPartition(PartitionData.fromJson(partitionDataJson, partitionColumnTypes));
}
appendFiles.appendFile(builder.build());
writtenFiles.add(task.getPath());
}
String dependencies = sourceTableHandles.stream().map(handle -> (IcebergTableHandle) handle).filter(handle -> handle.getSnapshotId().isPresent()).map(handle -> handle.getSchemaTableName() + "=" + handle.getSnapshotId().get()).distinct().collect(joining(","));
// try to leave as little garbage as possible behind
if (table.getRetryMode() != NO_RETRIES) {
cleanExtraOutputFiles(session, writtenFiles.build());
}
// Update the 'dependsOnTables' property that tracks tables on which the materialized view depends and the corresponding snapshot ids of the tables
appendFiles.set(DEPENDS_ON_TABLES, dependencies);
appendFiles.commit();
transaction.commitTransaction();
transaction = null;
return Optional.of(new HiveWrittenPartitions(commitTasks.stream().map(CommitTaskData::getPath).collect(toImmutableList())));
}
use of io.trino.spi.connector.ConnectorTableHandle in project trino by trinodb.
the class TestInformationSchemaMetadata method testInformationSchemaPredicatePushdownForEmptyNames.
@Test
public void testInformationSchemaPredicatePushdownForEmptyNames() {
TransactionId transactionId = transactionManager.beginTransaction(false);
ConnectorSession session = createNewSession(transactionId);
ConnectorMetadata metadata = new InformationSchemaMetadata("test_catalog", this.metadata);
InformationSchemaColumnHandle tableSchemaColumn = new InformationSchemaColumnHandle("table_schema");
InformationSchemaColumnHandle tableNameColumn = new InformationSchemaColumnHandle("table_name");
ConnectorTableHandle tableHandle = metadata.getTableHandle(session, new SchemaTableName("information_schema", "tables"));
// Empty schema name
InformationSchemaTableHandle filtered = metadata.applyFilter(session, tableHandle, new Constraint(TupleDomain.withColumnDomains(ImmutableMap.of(tableSchemaColumn, Domain.singleValue(VARCHAR, Slices.utf8Slice("")))))).map(ConstraintApplicationResult::getHandle).map(InformationSchemaTableHandle.class::cast).orElseThrow(AssertionError::new);
// "" schema name is valid schema name, but is (currently) valid for QualifiedTablePrefix
assertEquals(filtered.getPrefixes(), ImmutableSet.of(new QualifiedTablePrefix("test_catalog", "")));
// Empty table name
filtered = metadata.applyFilter(session, tableHandle, new Constraint(TupleDomain.withColumnDomains(ImmutableMap.of(tableNameColumn, Domain.singleValue(VARCHAR, Slices.utf8Slice("")))))).map(ConstraintApplicationResult::getHandle).map(InformationSchemaTableHandle.class::cast).orElseThrow(AssertionError::new);
// "" table name is valid schema name, but is (currently) valid for QualifiedTablePrefix
// filter blindly applies filter to all visible schemas, so information_schema must be included
assertEquals(filtered.getPrefixes(), ImmutableSet.of(new QualifiedTablePrefix("test_catalog", "test_schema", ""), new QualifiedTablePrefix("test_catalog", "information_schema", "")));
}
Aggregations