use of io.trino.spi.connector.ConnectorTableHandle in project trino by trinodb.
the class IcebergMetadata method applyProjection.
@Override
public Optional<ProjectionApplicationResult<ConnectorTableHandle>> applyProjection(ConnectorSession session, ConnectorTableHandle handle, List<ConnectorExpression> projections, Map<String, ColumnHandle> assignments) {
if (!isProjectionPushdownEnabled(session)) {
return Optional.empty();
}
// Create projected column representations for supported sub expressions. Simple column references and chain of
// dereferences on a variable are supported right now.
Set<ConnectorExpression> projectedExpressions = projections.stream().flatMap(expression -> extractSupportedProjectedColumns(expression).stream()).collect(toImmutableSet());
Map<ConnectorExpression, ProjectedColumnRepresentation> columnProjections = projectedExpressions.stream().collect(toImmutableMap(Function.identity(), HiveApplyProjectionUtil::createProjectedColumnRepresentation));
IcebergTableHandle icebergTableHandle = (IcebergTableHandle) handle;
// all references are simple variables
if (columnProjections.values().stream().allMatch(ProjectedColumnRepresentation::isVariable)) {
Set<IcebergColumnHandle> projectedColumns = assignments.values().stream().map(IcebergColumnHandle.class::cast).collect(toImmutableSet());
if (icebergTableHandle.getProjectedColumns().equals(projectedColumns)) {
return Optional.empty();
}
List<Assignment> assignmentsList = assignments.entrySet().stream().map(assignment -> new Assignment(assignment.getKey(), assignment.getValue(), ((IcebergColumnHandle) assignment.getValue()).getType())).collect(toImmutableList());
return Optional.of(new ProjectionApplicationResult<>(icebergTableHandle.withProjectedColumns(projectedColumns), projections, assignmentsList, false));
}
Map<String, Assignment> newAssignments = new HashMap<>();
ImmutableMap.Builder<ConnectorExpression, Variable> newVariablesBuilder = ImmutableMap.builder();
ImmutableSet.Builder<IcebergColumnHandle> projectedColumnsBuilder = ImmutableSet.builder();
for (Map.Entry<ConnectorExpression, ProjectedColumnRepresentation> entry : columnProjections.entrySet()) {
ConnectorExpression expression = entry.getKey();
ProjectedColumnRepresentation projectedColumn = entry.getValue();
IcebergColumnHandle baseColumnHandle = (IcebergColumnHandle) assignments.get(projectedColumn.getVariable().getName());
IcebergColumnHandle projectedColumnHandle = createProjectedColumnHandle(baseColumnHandle, projectedColumn.getDereferenceIndices(), expression.getType());
String projectedColumnName = projectedColumnHandle.getQualifiedName();
Variable projectedColumnVariable = new Variable(projectedColumnName, expression.getType());
Assignment newAssignment = new Assignment(projectedColumnName, projectedColumnHandle, expression.getType());
newAssignments.putIfAbsent(projectedColumnName, newAssignment);
newVariablesBuilder.put(expression, projectedColumnVariable);
projectedColumnsBuilder.add(projectedColumnHandle);
}
// Modify projections to refer to new variables
Map<ConnectorExpression, Variable> newVariables = newVariablesBuilder.buildOrThrow();
List<ConnectorExpression> newProjections = projections.stream().map(expression -> replaceWithNewVariables(expression, newVariables)).collect(toImmutableList());
List<Assignment> outputAssignments = newAssignments.values().stream().collect(toImmutableList());
return Optional.of(new ProjectionApplicationResult<>(icebergTableHandle.withProjectedColumns(projectedColumnsBuilder.build()), newProjections, outputAssignments, false));
}
use of io.trino.spi.connector.ConnectorTableHandle in project trino by trinodb.
the class IcebergPageSourceProvider method createPageSource.
@Override
public ConnectorPageSource createPageSource(ConnectorTransactionHandle transaction, ConnectorSession session, ConnectorSplit connectorSplit, ConnectorTableHandle connectorTable, List<ColumnHandle> columns, DynamicFilter dynamicFilter) {
IcebergSplit split = (IcebergSplit) connectorSplit;
IcebergTableHandle table = (IcebergTableHandle) connectorTable;
List<IcebergColumnHandle> icebergColumns = columns.stream().map(IcebergColumnHandle.class::cast).collect(toImmutableList());
Map<Integer, Optional<String>> partitionKeys = split.getPartitionKeys();
List<IcebergColumnHandle> regularColumns = columns.stream().map(IcebergColumnHandle.class::cast).filter(column -> !partitionKeys.containsKey(column.getId())).collect(toImmutableList());
TupleDomain<IcebergColumnHandle> effectivePredicate = table.getUnenforcedPredicate().intersect(dynamicFilter.getCurrentPredicate().transformKeys(IcebergColumnHandle.class::cast)).simplify(ICEBERG_DOMAIN_COMPACTION_THRESHOLD);
HdfsContext hdfsContext = new HdfsContext(session);
ReaderPageSource dataPageSource = createDataPageSource(session, hdfsContext, new Path(split.getPath()), split.getStart(), split.getLength(), split.getFileSize(), split.getFileFormat(), regularColumns, effectivePredicate, table.getNameMappingJson().map(NameMappingParser::fromJson));
Optional<ReaderProjectionsAdapter> projectionsAdapter = dataPageSource.getReaderColumns().map(readerColumns -> new ReaderProjectionsAdapter(regularColumns, readerColumns, column -> ((IcebergColumnHandle) column).getType(), IcebergPageSourceProvider::applyProjection));
return new IcebergPageSource(icebergColumns, partitionKeys, dataPageSource.get(), projectionsAdapter);
}
use of io.trino.spi.connector.ConnectorTableHandle in project trino by trinodb.
the class TestMemoryMetadata method testRenameTable.
@Test
public void testRenameTable() {
SchemaTableName tableName = new SchemaTableName("test_schema", "test_table_to_be_renamed");
metadata.createSchema(SESSION, "test_schema", ImmutableMap.of(), new TrinoPrincipal(USER, SESSION.getUser()));
ConnectorOutputTableHandle table = metadata.beginCreateTable(SESSION, new ConnectorTableMetadata(tableName, ImmutableList.of(), ImmutableMap.of()), Optional.empty(), NO_RETRIES);
metadata.finishCreateTable(SESSION, table, ImmutableList.of(), ImmutableList.of());
// rename table to schema which does not exist
SchemaTableName invalidSchemaTableName = new SchemaTableName("test_schema_not_exist", "test_table_renamed");
ConnectorTableHandle tableHandle = metadata.getTableHandle(SESSION, tableName);
Throwable throwable = expectThrows(SchemaNotFoundException.class, () -> metadata.renameTable(SESSION, tableHandle, invalidSchemaTableName));
assertEquals(throwable.getMessage(), "Schema test_schema_not_exist not found");
// rename table to same schema
SchemaTableName sameSchemaTableName = new SchemaTableName("test_schema", "test_renamed");
metadata.renameTable(SESSION, metadata.getTableHandle(SESSION, tableName), sameSchemaTableName);
assertEquals(metadata.listTables(SESSION, Optional.of("test_schema")), ImmutableList.of(sameSchemaTableName));
// rename table to different schema
metadata.createSchema(SESSION, "test_different_schema", ImmutableMap.of(), new TrinoPrincipal(USER, SESSION.getUser()));
SchemaTableName differentSchemaTableName = new SchemaTableName("test_different_schema", "test_renamed");
metadata.renameTable(SESSION, metadata.getTableHandle(SESSION, sameSchemaTableName), differentSchemaTableName);
assertEquals(metadata.listTables(SESSION, Optional.of("test_schema")), ImmutableList.of());
assertEquals(metadata.listTables(SESSION, Optional.of("test_different_schema")), ImmutableList.of(differentSchemaTableName));
}
use of io.trino.spi.connector.ConnectorTableHandle in project trino by trinodb.
the class TestMemoryMetadata method tableAlreadyExists.
@Test
public void tableAlreadyExists() {
assertNoTables();
SchemaTableName test1Table = new SchemaTableName("default", "test1");
SchemaTableName test2Table = new SchemaTableName("default", "test2");
metadata.createTable(SESSION, new ConnectorTableMetadata(test1Table, ImmutableList.of()), false);
assertTrinoExceptionThrownBy(() -> metadata.createTable(SESSION, new ConnectorTableMetadata(test1Table, ImmutableList.of()), false)).hasErrorCode(ALREADY_EXISTS).hasMessage("Table [default.test1] already exists");
ConnectorTableHandle test1TableHandle = metadata.getTableHandle(SESSION, test1Table);
metadata.createTable(SESSION, new ConnectorTableMetadata(test2Table, ImmutableList.of()), false);
assertTrinoExceptionThrownBy(() -> metadata.renameTable(SESSION, test1TableHandle, test2Table)).hasErrorCode(ALREADY_EXISTS).hasMessage("Table [default.test2] already exists");
}
use of io.trino.spi.connector.ConnectorTableHandle in project trino by trinodb.
the class PhoenixSplitManager method getSplits.
@Override
public ConnectorSplitSource getSplits(ConnectorTransactionHandle transaction, ConnectorSession session, ConnectorTableHandle table, SplitSchedulingStrategy splitSchedulingStrategy, DynamicFilter dynamicFilter) {
JdbcTableHandle tableHandle = (JdbcTableHandle) table;
try (Connection connection = phoenixClient.getConnection(session)) {
List<JdbcColumnHandle> columns = tableHandle.getColumns().map(columnSet -> columnSet.stream().map(JdbcColumnHandle.class::cast).collect(toList())).orElseGet(() -> phoenixClient.getColumns(session, tableHandle));
PhoenixPreparedStatement inputQuery = (PhoenixPreparedStatement) phoenixClient.prepareStatement(session, connection, tableHandle, columns, Optional.empty());
int maxScansPerSplit = session.getProperty(PhoenixSessionProperties.MAX_SCANS_PER_SPLIT, Integer.class);
List<ConnectorSplit> splits = getSplits(inputQuery, maxScansPerSplit).stream().map(PhoenixInputSplit.class::cast).map(split -> new PhoenixSplit(getSplitAddresses(split), SerializedPhoenixInputSplit.serialize(split))).collect(toImmutableList());
return new FixedSplitSource(splits);
} catch (IOException | SQLException e) {
throw new TrinoException(PHOENIX_SPLIT_ERROR, "Couldn't get Phoenix splits", e);
}
}
Aggregations