use of io.trino.spi.connector.InMemoryRecordSet in project trino by trinodb.
the class PropertiesSystemTableProvider method getSystemTable.
@Override
public Optional<SystemTable> getSystemTable(HiveMetadata metadata, ConnectorSession session, SchemaTableName tableName) {
if (!PROPERTIES.matches(tableName)) {
return Optional.empty();
}
SchemaTableName sourceTableName = PROPERTIES.getSourceTableName(tableName);
Table table = metadata.getMetastore().getTable(sourceTableName.getSchemaName(), sourceTableName.getTableName()).orElseThrow(() -> new TableNotFoundException(tableName));
if (isDeltaLakeTable(table) || isIcebergTable(table)) {
return Optional.empty();
}
Map<String, String> sortedTableParameters = ImmutableSortedMap.copyOf(table.getParameters());
List<ColumnMetadata> columns = sortedTableParameters.keySet().stream().map(key -> new ColumnMetadata(key, VarcharType.VARCHAR)).collect(toImmutableList());
List<Type> types = columns.stream().map(ColumnMetadata::getType).collect(toImmutableList());
Iterable<List<Object>> propertyValues = ImmutableList.of(ImmutableList.copyOf(sortedTableParameters.values()));
return Optional.of(createSystemTable(new ConnectorTableMetadata(sourceTableName, columns), constraint -> new InMemoryRecordSet(types, propertyValues).cursor()));
}
use of io.trino.spi.connector.InMemoryRecordSet in project trino by trinodb.
the class TestFieldSetFilteringRecordSet method test.
@Test
public void test() {
ArrayType arrayOfBigintType = new ArrayType(BIGINT);
FieldSetFilteringRecordSet fieldSetFilteringRecordSet = new FieldSetFilteringRecordSet(new TypeOperators(), new InMemoryRecordSet(ImmutableList.of(BIGINT, BIGINT, TIMESTAMP_WITH_TIME_ZONE, TIMESTAMP_WITH_TIME_ZONE, arrayOfBigintType, arrayOfBigintType), ImmutableList.of(ImmutableList.of(100L, 100L, // test same time in different time zone to make sure equal check was done properly
packDateTimeWithZone(100, getTimeZoneKeyForOffset(123)), packDateTimeWithZone(100, getTimeZoneKeyForOffset(234)), // test structural type
arrayBlockOf(BIGINT, 12, 34, 56), arrayBlockOf(BIGINT, 12, 34, 56)))), ImmutableList.of(ImmutableSet.of(0, 1), ImmutableSet.of(2, 3), ImmutableSet.of(4, 5)));
RecordCursor recordCursor = fieldSetFilteringRecordSet.cursor();
assertTrue(recordCursor.advanceNextPosition());
}
use of io.trino.spi.connector.InMemoryRecordSet in project trino by trinodb.
the class BigQueryMetadata method getViewDefinitionSystemTable.
private Optional<SystemTable> getViewDefinitionSystemTable(ConnectorSession session, SchemaTableName viewDefinitionTableName, SchemaTableName sourceTableName) {
BigQueryClient client = bigQueryClientFactory.create(session);
String projectId = getProjectId(client);
String remoteSchemaName = client.toRemoteDataset(projectId, sourceTableName.getSchemaName()).map(RemoteDatabaseObject::getOnlyRemoteName).orElseThrow(() -> new TableNotFoundException(viewDefinitionTableName));
String remoteTableName = client.toRemoteTable(projectId, remoteSchemaName, sourceTableName.getTableName()).map(RemoteDatabaseObject::getOnlyRemoteName).orElseThrow(() -> new TableNotFoundException(viewDefinitionTableName));
TableInfo tableInfo = client.getTable(TableId.of(projectId, remoteSchemaName, remoteTableName)).orElseThrow(() -> new TableNotFoundException(viewDefinitionTableName));
if (!(tableInfo.getDefinition() instanceof ViewDefinition)) {
throw new TableNotFoundException(viewDefinitionTableName);
}
List<ColumnMetadata> columns = ImmutableList.of(new ColumnMetadata("query", VarcharType.VARCHAR));
List<Type> types = columns.stream().map(ColumnMetadata::getType).collect(toImmutableList());
Optional<String> query = Optional.ofNullable(((ViewDefinition) tableInfo.getDefinition()).getQuery());
Iterable<List<Object>> propertyValues = ImmutableList.of(ImmutableList.of(query.orElse("NULL")));
return Optional.of(createSystemTable(new ConnectorTableMetadata(sourceTableName, columns), constraint -> new InMemoryRecordSet(types, propertyValues).cursor()));
}
use of io.trino.spi.connector.InMemoryRecordSet in project trino by trinodb.
the class PartitionsSystemTableProvider method getSystemTable.
@Override
public Optional<SystemTable> getSystemTable(HiveMetadata metadata, ConnectorSession session, SchemaTableName tableName) {
if (!PARTITIONS.matches(tableName)) {
return Optional.empty();
}
SchemaTableName sourceTableName = PARTITIONS.getSourceTableName(tableName);
Table sourceTable = metadata.getMetastore().getTable(sourceTableName.getSchemaName(), sourceTableName.getTableName()).orElse(null);
if (sourceTable == null || isDeltaLakeTable(sourceTable) || isIcebergTable(sourceTable)) {
return Optional.empty();
}
verifyOnline(sourceTableName, Optional.empty(), getProtectMode(sourceTable), sourceTable.getParameters());
HiveTableHandle sourceTableHandle = new HiveTableHandle(sourceTableName.getSchemaName(), sourceTableName.getTableName(), sourceTable.getParameters(), getPartitionKeyColumnHandles(sourceTable, typeManager), getRegularColumnHandles(sourceTable, typeManager, getTimestampPrecision(session)), getHiveBucketHandle(session, sourceTable, typeManager));
List<HiveColumnHandle> partitionColumns = sourceTableHandle.getPartitionColumns();
if (partitionColumns.isEmpty()) {
return Optional.empty();
}
List<Type> partitionColumnTypes = partitionColumns.stream().map(HiveColumnHandle::getType).collect(toImmutableList());
List<ColumnMetadata> partitionSystemTableColumns = partitionColumns.stream().map(column -> ColumnMetadata.builder().setName(column.getName()).setType(column.getType()).setComment(column.getComment()).setHidden(column.isHidden()).build()).collect(toImmutableList());
Map<Integer, HiveColumnHandle> fieldIdToColumnHandle = IntStream.range(0, partitionColumns.size()).boxed().collect(toImmutableMap(identity(), partitionColumns::get));
return Optional.of(createSystemTable(new ConnectorTableMetadata(tableName, partitionSystemTableColumns), constraint -> {
Constraint targetConstraint = new Constraint(constraint.transformKeys(fieldIdToColumnHandle::get));
Iterable<List<Object>> records = () -> stream(partitionManager.getPartitions(metadata.getMetastore(), sourceTableHandle, targetConstraint).getPartitions()).map(hivePartition -> IntStream.range(0, partitionColumns.size()).mapToObj(fieldIdToColumnHandle::get).map(columnHandle -> hivePartition.getKeys().get(columnHandle).getValue()).collect(// nullable
toList())).iterator();
return new InMemoryRecordSet(partitionColumnTypes, records).cursor();
}));
}
use of io.trino.spi.connector.InMemoryRecordSet in project trino by trinodb.
the class PartitionTable method buildRecordCursor.
private RecordCursor buildRecordCursor(Map<StructLikeWrapper, IcebergStatistics> partitionStatistics, List<PartitionField> partitionFields) {
List<Type> partitionTypes = partitionTypes(partitionFields);
List<? extends Class<?>> partitionColumnClass = partitionTypes.stream().map(type -> type.typeId().javaClass()).collect(toImmutableList());
ImmutableList.Builder<List<Object>> records = ImmutableList.builder();
for (Map.Entry<StructLikeWrapper, IcebergStatistics> partitionEntry : partitionStatistics.entrySet()) {
StructLikeWrapper partitionStruct = partitionEntry.getKey();
IcebergStatistics icebergStatistics = partitionEntry.getValue();
List<Object> row = new ArrayList<>();
// add data for partition columns
partitionColumnType.ifPresent(partitionColumnType -> {
BlockBuilder partitionRowBlockBuilder = partitionColumnType.createBlockBuilder(null, 1);
BlockBuilder partitionBlockBuilder = partitionRowBlockBuilder.beginBlockEntry();
for (int i = 0; i < partitionColumnTypes.size(); i++) {
io.trino.spi.type.Type trinoType = partitionColumnType.getFields().get(i).getType();
Object value = convertIcebergValueToTrino(partitionTypes.get(i), partitionStruct.get().get(i, partitionColumnClass.get(i)));
writeNativeValue(trinoType, partitionBlockBuilder, value);
}
partitionRowBlockBuilder.closeEntry();
row.add(partitionColumnType.getObject(partitionRowBlockBuilder, 0));
});
// add the top level metrics.
row.add(icebergStatistics.getRecordCount());
row.add(icebergStatistics.getFileCount());
row.add(icebergStatistics.getSize());
// add column level metrics
dataColumnType.ifPresent(dataColumnType -> {
BlockBuilder dataRowBlockBuilder = dataColumnType.createBlockBuilder(null, 1);
BlockBuilder dataBlockBuilder = dataRowBlockBuilder.beginBlockEntry();
for (int i = 0; i < columnMetricTypes.size(); i++) {
Integer fieldId = nonPartitionPrimitiveColumns.get(i).fieldId();
Object min = icebergStatistics.getMinValues().get(fieldId);
Object max = icebergStatistics.getMaxValues().get(fieldId);
Long nullCount = icebergStatistics.getNullCounts().get(fieldId);
if (min == null && max == null && nullCount == null) {
row.add(null);
return;
}
RowType columnMetricType = columnMetricTypes.get(i);
columnMetricType.writeObject(dataBlockBuilder, getColumnMetricBlock(columnMetricType, min, max, nullCount));
}
dataRowBlockBuilder.closeEntry();
row.add(dataColumnType.getObject(dataRowBlockBuilder, 0));
});
records.add(row);
}
return new InMemoryRecordSet(resultTypes, records.build()).cursor();
}
Aggregations