use of io.trino.spi.connector.ConnectorTableHandle in project trino by trinodb.
the class DefaultJdbcMetadata method applyAggregation.
@Override
public Optional<AggregationApplicationResult<ConnectorTableHandle>> applyAggregation(ConnectorSession session, ConnectorTableHandle table, List<AggregateFunction> aggregates, Map<String, ColumnHandle> assignments, List<List<ColumnHandle>> groupingSets) {
if (!isAggregationPushdownEnabled(session)) {
return Optional.empty();
}
JdbcTableHandle handle = (JdbcTableHandle) table;
// Global aggregation is represented by [[]]
verify(!groupingSets.isEmpty(), "No grouping sets provided");
if (!jdbcClient.supportsAggregationPushdown(session, handle, aggregates, assignments, groupingSets)) {
// JDBC client implementation prevents pushdown for the given table
return Optional.empty();
}
if (handle.getLimit().isPresent()) {
handle = flushAttributesAsQuery(session, handle);
}
int nextSyntheticColumnId = handle.getNextSyntheticColumnId();
ImmutableList.Builder<JdbcColumnHandle> newColumns = ImmutableList.builder();
ImmutableList.Builder<ConnectorExpression> projections = ImmutableList.builder();
ImmutableList.Builder<Assignment> resultAssignments = ImmutableList.builder();
ImmutableMap.Builder<String, String> expressions = ImmutableMap.builder();
List<List<JdbcColumnHandle>> groupingSetsAsJdbcColumnHandles = groupingSets.stream().map(groupingSet -> groupingSet.stream().map(JdbcColumnHandle.class::cast).collect(toImmutableList())).collect(toImmutableList());
Optional<List<JdbcColumnHandle>> tableColumns = handle.getColumns();
groupingSetsAsJdbcColumnHandles.stream().flatMap(List::stream).distinct().peek(handle.getColumns().<Consumer<JdbcColumnHandle>>map(columns -> groupKey -> verify(columns.contains(groupKey), "applyAggregation called with a grouping column %s which was not included in the table columns: %s", groupKey, tableColumns)).orElse(groupKey -> {
})).forEach(newColumns::add);
for (AggregateFunction aggregate : aggregates) {
Optional<JdbcExpression> expression = jdbcClient.implementAggregation(session, aggregate, assignments);
if (expression.isEmpty()) {
return Optional.empty();
}
String columnName = SYNTHETIC_COLUMN_NAME_PREFIX + nextSyntheticColumnId;
nextSyntheticColumnId++;
JdbcColumnHandle newColumn = JdbcColumnHandle.builder().setColumnName(columnName).setJdbcTypeHandle(expression.get().getJdbcTypeHandle()).setColumnType(aggregate.getOutputType()).setComment(Optional.of("synthetic")).build();
newColumns.add(newColumn);
projections.add(new Variable(newColumn.getColumnName(), aggregate.getOutputType()));
resultAssignments.add(new Assignment(newColumn.getColumnName(), newColumn, aggregate.getOutputType()));
expressions.put(columnName, expression.get().getExpression());
}
List<JdbcColumnHandle> newColumnsList = newColumns.build();
// We need to have matching column handles in JdbcTableHandle constructed below, as columns read via JDBC must match column handles list.
// For more context see assertion in JdbcRecordSetProvider.getRecordSet
PreparedQuery preparedQuery = jdbcClient.prepareQuery(session, handle, Optional.of(groupingSetsAsJdbcColumnHandles), newColumnsList, expressions.buildOrThrow());
handle = new JdbcTableHandle(new JdbcQueryRelationHandle(preparedQuery), TupleDomain.all(), ImmutableList.of(), Optional.empty(), OptionalLong.empty(), Optional.of(newColumnsList), handle.getAllReferencedTables(), nextSyntheticColumnId);
return Optional.of(new AggregationApplicationResult<>(handle, projections.build(), resultAssignments.build(), ImmutableMap.of(), false));
}
use of io.trino.spi.connector.ConnectorTableHandle in project trino by trinodb.
the class AbstractTestHive method insertData.
private String insertData(SchemaTableName tableName, MaterializedResult data, Map<String, Object> sessionProperties) throws Exception {
Path writePath;
Path targetPath;
String queryId;
try (Transaction transaction = newTransaction()) {
ConnectorMetadata metadata = transaction.getMetadata();
ConnectorSession session = newSession(sessionProperties);
ConnectorTableHandle tableHandle = getTableHandle(metadata, tableName);
ConnectorInsertTableHandle insertTableHandle = metadata.beginInsert(session, tableHandle, ImmutableList.of(), NO_RETRIES);
queryId = session.getQueryId();
writePath = getStagingPathRoot(insertTableHandle);
targetPath = getTargetPathRoot(insertTableHandle);
ConnectorPageSink sink = pageSinkProvider.createPageSink(transaction.getTransactionHandle(), session, insertTableHandle);
// write data
sink.appendPage(data.toPage());
Collection<Slice> fragments = getFutureValue(sink.finish());
// commit the insert
metadata.finishInsert(session, insertTableHandle, fragments, ImmutableList.of());
transaction.commit();
}
// check that temporary files are removed
if (!writePath.equals(targetPath)) {
HdfsContext context = new HdfsContext(newSession());
FileSystem fileSystem = hdfsEnvironment.getFileSystem(context, writePath);
assertFalse(fileSystem.exists(writePath));
}
return queryId;
}
use of io.trino.spi.connector.ConnectorTableHandle in project trino by trinodb.
the class AbstractTestHive method testGetRecordsUnpartitioned.
@Test
public void testGetRecordsUnpartitioned() throws Exception {
try (Transaction transaction = newTransaction()) {
ConnectorMetadata metadata = transaction.getMetadata();
ConnectorSession session = newSession();
metadata.beginQuery(session);
ConnectorTableHandle tableHandle = getTableHandle(metadata, tableUnpartitioned);
List<ColumnHandle> columnHandles = ImmutableList.copyOf(metadata.getColumnHandles(session, tableHandle).values());
Map<String, Integer> columnIndex = indexColumns(columnHandles);
List<ConnectorSplit> splits = getAllSplits(tableHandle, transaction, session);
assertThat(splits).hasSameSizeAs(tableUnpartitionedPartitions);
for (ConnectorSplit split : splits) {
HiveSplit hiveSplit = (HiveSplit) split;
assertEquals(hiveSplit.getPartitionKeys(), ImmutableList.of());
long rowNumber = 0;
try (ConnectorPageSource pageSource = pageSourceProvider.createPageSource(transaction.getTransactionHandle(), session, split, tableHandle, columnHandles, DynamicFilter.EMPTY)) {
assertPageSourceType(pageSource, TEXTFILE);
MaterializedResult result = materializeSourceDataStream(session, pageSource, getTypes(columnHandles));
for (MaterializedRow row : result) {
rowNumber++;
if (rowNumber % 19 == 0) {
assertNull(row.getField(columnIndex.get("t_string")));
} else if (rowNumber % 19 == 1) {
assertEquals(row.getField(columnIndex.get("t_string")), "");
} else {
assertEquals(row.getField(columnIndex.get("t_string")), "unpartitioned");
}
assertEquals(row.getField(columnIndex.get("t_tinyint")), (byte) (1 + rowNumber));
}
}
assertEquals(rowNumber, 100);
}
}
}
use of io.trino.spi.connector.ConnectorTableHandle in project trino by trinodb.
the class AbstractTestHive method insertPartitionedBucketedTableLayout.
protected void insertPartitionedBucketedTableLayout(boolean transactional) throws Exception {
SchemaTableName tableName = temporaryTable("empty_partitioned_table");
try {
Column partitioningColumn = new Column("column2", HIVE_LONG, Optional.empty());
List<Column> columns = ImmutableList.of(new Column("column1", HIVE_STRING, Optional.empty()), partitioningColumn);
HiveBucketProperty bucketProperty = new HiveBucketProperty(ImmutableList.of("column1"), BUCKETING_V1, 4, ImmutableList.of());
createEmptyTable(tableName, ORC, columns, ImmutableList.of(partitioningColumn), Optional.of(bucketProperty), transactional);
try (Transaction transaction = newTransaction()) {
ConnectorMetadata metadata = transaction.getMetadata();
ConnectorSession session = newSession();
ConnectorTableHandle tableHandle = getTableHandle(metadata, tableName);
Optional<ConnectorTableLayout> insertLayout = metadata.getInsertLayout(session, tableHandle);
assertTrue(insertLayout.isPresent());
ConnectorPartitioningHandle partitioningHandle = new HivePartitioningHandle(bucketProperty.getBucketingVersion(), bucketProperty.getBucketCount(), ImmutableList.of(HIVE_STRING), OptionalInt.empty(), true);
assertEquals(insertLayout.get().getPartitioning(), Optional.of(partitioningHandle));
assertEquals(insertLayout.get().getPartitionColumns(), ImmutableList.of("column1", "column2"));
ConnectorBucketNodeMap connectorBucketNodeMap = nodePartitioningProvider.getBucketNodeMap(transaction.getTransactionHandle(), session, partitioningHandle);
assertEquals(connectorBucketNodeMap.getBucketCount(), 32);
assertTrue(connectorBucketNodeMap.hasFixedMapping());
assertEquals(connectorBucketNodeMap.getFixedMapping().size(), 32);
}
} finally {
dropTable(tableName);
}
}
use of io.trino.spi.connector.ConnectorTableHandle in project trino by trinodb.
the class AbstractTestHive method testGetPartialRecords.
@Test
public void testGetPartialRecords() throws Exception {
try (Transaction transaction = newTransaction()) {
ConnectorMetadata metadata = transaction.getMetadata();
ConnectorSession session = newSession();
metadata.beginQuery(session);
ConnectorTableHandle tableHandle = getTableHandle(metadata, tablePartitionFormat);
List<ColumnHandle> columnHandles = ImmutableList.copyOf(metadata.getColumnHandles(session, tableHandle).values());
Map<String, Integer> columnIndex = indexColumns(columnHandles);
List<ConnectorSplit> splits = getAllSplits(tableHandle, transaction, session);
assertEquals(splits.size(), tablePartitionFormatPartitions.size());
for (ConnectorSplit split : splits) {
HiveSplit hiveSplit = (HiveSplit) split;
List<HivePartitionKey> partitionKeys = hiveSplit.getPartitionKeys();
String ds = partitionKeys.get(0).getValue();
String fileFormat = partitionKeys.get(1).getValue();
HiveStorageFormat fileType = HiveStorageFormat.valueOf(fileFormat.toUpperCase(ENGLISH));
int dummyPartition = Integer.parseInt(partitionKeys.get(2).getValue());
long rowNumber = 0;
try (ConnectorPageSource pageSource = pageSourceProvider.createPageSource(transaction.getTransactionHandle(), session, hiveSplit, tableHandle, columnHandles, DynamicFilter.EMPTY)) {
assertPageSourceType(pageSource, fileType);
MaterializedResult result = materializeSourceDataStream(session, pageSource, getTypes(columnHandles));
for (MaterializedRow row : result) {
rowNumber++;
assertEquals(row.getField(columnIndex.get("t_double")), 6.2 + rowNumber);
assertEquals(row.getField(columnIndex.get("ds")), ds);
assertEquals(row.getField(columnIndex.get("file_format")), fileFormat);
assertEquals(row.getField(columnIndex.get("dummy")), dummyPartition);
}
}
assertEquals(rowNumber, 100);
}
}
}
Aggregations