Search in sources :

Example 1 with DeltaLakeColumnHandle

use of io.trino.plugin.deltalake.DeltaLakeColumnHandle in project trino by trinodb.

the class TestDeltaLakeSchemaSupport method testRoundTripComplexSchema.

@Test
public void testRoundTripComplexSchema() throws IOException, URISyntaxException {
    URL expected = getResource("io/trino/plugin/deltalake/transactionlog/schema/complex_schema.json");
    String json = Files.readString(Path.of(expected.toURI()));
    List<ColumnMetadata> schema = DeltaLakeSchemaSupport.getColumnMetadata(json, typeManager);
    List<DeltaLakeColumnHandle> columnHandles = schema.stream().map(metadata -> new DeltaLakeColumnHandle(metadata.getName(), metadata.getType(), REGULAR)).collect(toImmutableList());
    ObjectMapper objectMapper = new ObjectMapper();
    assertEquals(objectMapper.readTree(serializeSchemaAsJson(columnHandles)), objectMapper.readTree(json));
}
Also used : DataProvider(org.testng.annotations.DataProvider) AssertionsForClassTypes.assertThatCode(org.assertj.core.api.AssertionsForClassTypes.assertThatCode) INTERVAL_YEAR_MONTH(io.trino.type.IntervalYearMonthType.INTERVAL_YEAR_MONTH) ColumnMetadata(io.trino.spi.connector.ColumnMetadata) URL(java.net.URL) DeltaLakeSchemaSupport.serializeSchemaAsJson(io.trino.plugin.deltalake.transactionlog.DeltaLakeSchemaSupport.serializeSchemaAsJson) TIMESTAMP_MILLIS(io.trino.spi.type.TimestampType.TIMESTAMP_MILLIS) URISyntaxException(java.net.URISyntaxException) Type(io.trino.spi.type.Type) BOOLEAN(io.trino.spi.type.BooleanType.BOOLEAN) Test(org.testng.annotations.Test) TypeOperators(io.trino.spi.type.TypeOperators) VarcharType(io.trino.spi.type.VarcharType) TIMESTAMP_TZ_MILLIS(io.trino.spi.type.TimestampWithTimeZoneType.TIMESTAMP_TZ_MILLIS) VARCHAR(io.trino.spi.type.VarcharType.VARCHAR) ImmutableList(com.google.common.collect.ImmutableList) TestingComplexTypeManager(io.trino.plugin.deltalake.TestingComplexTypeManager) DeltaLakeColumnHandle(io.trino.plugin.deltalake.DeltaLakeColumnHandle) VARBINARY(io.trino.spi.type.VarbinaryType.VARBINARY) TIMESTAMP_WITH_TIME_ZONE(io.trino.spi.type.TimestampWithTimeZoneType.TIMESTAMP_WITH_TIME_ZONE) INTEGER(io.trino.spi.type.IntegerType.INTEGER) INTERVAL_DAY_TIME(io.trino.type.IntervalDayTimeType.INTERVAL_DAY_TIME) Path(java.nio.file.Path) DeltaLakeSchemaSupport.serializeStatsAsJson(io.trino.plugin.deltalake.transactionlog.DeltaLakeSchemaSupport.serializeStatsAsJson) SMALLINT(io.trino.spi.type.SmallintType.SMALLINT) TIMESTAMP_SECONDS(io.trino.spi.type.TimestampType.TIMESTAMP_SECONDS) Assert.assertEquals(io.trino.testing.assertions.Assert.assertEquals) RowType(io.trino.spi.type.RowType) ImmutableMap(com.google.common.collect.ImmutableMap) Files(java.nio.file.Files) MapType(io.trino.spi.type.MapType) ImmutableList.toImmutableList(com.google.common.collect.ImmutableList.toImmutableList) ObjectMapper(com.fasterxml.jackson.databind.ObjectMapper) JsonProcessingException(com.fasterxml.jackson.core.JsonProcessingException) ArrayType(io.trino.spi.type.ArrayType) IOException(java.io.IOException) DOUBLE(io.trino.spi.type.DoubleType.DOUBLE) Resources.getResource(com.google.common.io.Resources.getResource) List(java.util.List) DeltaLakeJsonFileStatistics(io.trino.plugin.deltalake.transactionlog.statistics.DeltaLakeJsonFileStatistics) BIGINT(io.trino.spi.type.BigintType.BIGINT) CharType(io.trino.spi.type.CharType) Optional(java.util.Optional) TINYINT(io.trino.spi.type.TinyintType.TINYINT) DecimalType(io.trino.spi.type.DecimalType) REGULAR(io.trino.plugin.deltalake.DeltaLakeColumnType.REGULAR) DATE(io.trino.spi.type.DateType.DATE) REAL(io.trino.spi.type.RealType.REAL) ColumnMetadata(io.trino.spi.connector.ColumnMetadata) DeltaLakeColumnHandle(io.trino.plugin.deltalake.DeltaLakeColumnHandle) URL(java.net.URL) ObjectMapper(com.fasterxml.jackson.databind.ObjectMapper) Test(org.testng.annotations.Test)

Example 2 with DeltaLakeColumnHandle

use of io.trino.plugin.deltalake.DeltaLakeColumnHandle in project trino by trinodb.

the class TestDeltaLakeSchemaSupport method testSerializeSchemaAsJson.

@Test
public void testSerializeSchemaAsJson() throws Exception {
    DeltaLakeColumnHandle arrayColumn = new DeltaLakeColumnHandle("arr", new ArrayType(new ArrayType(INTEGER)), REGULAR);
    DeltaLakeColumnHandle structColumn = new DeltaLakeColumnHandle("str", RowType.from(ImmutableList.of(new RowType.Field(Optional.of("s1"), VarcharType.createUnboundedVarcharType()), new RowType.Field(Optional.of("s2"), RowType.from(ImmutableList.of(new RowType.Field(Optional.of("i1"), INTEGER), new RowType.Field(Optional.of("d2"), DecimalType.createDecimalType(38, 0))))))), REGULAR);
    TypeOperators typeOperators = new TypeOperators();
    DeltaLakeColumnHandle mapColumn = new DeltaLakeColumnHandle("m", new MapType(INTEGER, new MapType(INTEGER, INTEGER, typeOperators), typeOperators), REGULAR);
    URL expected = getResource("io/trino/plugin/deltalake/transactionlog/schema/nested_schema.json");
    ObjectMapper objectMapper = new ObjectMapper();
    String jsonEncoding = serializeSchemaAsJson(ImmutableList.of(arrayColumn, structColumn, mapColumn));
    assertEquals(objectMapper.readTree(jsonEncoding), objectMapper.readTree(expected));
}
Also used : ArrayType(io.trino.spi.type.ArrayType) RowType(io.trino.spi.type.RowType) DeltaLakeColumnHandle(io.trino.plugin.deltalake.DeltaLakeColumnHandle) MapType(io.trino.spi.type.MapType) URL(java.net.URL) ObjectMapper(com.fasterxml.jackson.databind.ObjectMapper) TypeOperators(io.trino.spi.type.TypeOperators) Test(org.testng.annotations.Test)

Example 3 with DeltaLakeColumnHandle

use of io.trino.plugin.deltalake.DeltaLakeColumnHandle in project trino by trinodb.

the class TestDeltaLakeMetastoreStatistics method testStatisticsParquetParsedStatistics.

@Test
public void testStatisticsParquetParsedStatistics() {
    // The transaction log for this table was created so that the checkpoints only write struct statistics, not json statistics
    DeltaLakeTableHandle tableHandle = registerTable("parquet_struct_statistics");
    TableStatistics stats = deltaLakeMetastore.getTableStatistics(SESSION, tableHandle, Constraint.alwaysTrue());
    assertEquals(stats.getRowCount(), Estimate.of(9));
    Map<ColumnHandle, ColumnStatistics> statisticsMap = stats.getColumnStatistics();
    ColumnStatistics columnStats = statisticsMap.get(new DeltaLakeColumnHandle("dec_short", DecimalType.createDecimalType(5, 1), REGULAR));
    assertEquals(columnStats.getNullsFraction(), Estimate.zero());
    assertEquals(columnStats.getRange().get().getMin(), -10.1);
    assertEquals(columnStats.getRange().get().getMax(), 10.1);
    columnStats = statisticsMap.get(new DeltaLakeColumnHandle("dec_long", DecimalType.createDecimalType(25, 3), REGULAR));
    assertEquals(columnStats.getNullsFraction(), Estimate.zero());
    assertEquals(columnStats.getRange().get().getMin(), -999999999999.123);
    assertEquals(columnStats.getRange().get().getMax(), 999999999999.123);
    columnStats = statisticsMap.get(new DeltaLakeColumnHandle("l", BIGINT, REGULAR));
    assertEquals(columnStats.getNullsFraction(), Estimate.zero());
    assertEquals(columnStats.getRange().get().getMin(), -10000000.0);
    assertEquals(columnStats.getRange().get().getMax(), 10000000.0);
    columnStats = statisticsMap.get(new DeltaLakeColumnHandle("in", INTEGER, REGULAR));
    assertEquals(columnStats.getNullsFraction(), Estimate.zero());
    assertEquals(columnStats.getRange().get().getMin(), -20000000.0);
    assertEquals(columnStats.getRange().get().getMax(), 20000000.0);
    columnStats = statisticsMap.get(new DeltaLakeColumnHandle("sh", SMALLINT, REGULAR));
    assertEquals(columnStats.getNullsFraction(), Estimate.zero());
    assertEquals(columnStats.getRange().get().getMin(), -123.0);
    assertEquals(columnStats.getRange().get().getMax(), 123.0);
    columnStats = statisticsMap.get(new DeltaLakeColumnHandle("byt", TINYINT, REGULAR));
    assertEquals(columnStats.getNullsFraction(), Estimate.zero());
    assertEquals(columnStats.getRange().get().getMin(), -42.0);
    assertEquals(columnStats.getRange().get().getMax(), 42.0);
    columnStats = statisticsMap.get(new DeltaLakeColumnHandle("fl", REAL, REGULAR));
    assertEquals(columnStats.getNullsFraction(), Estimate.zero());
    assertEquals((float) columnStats.getRange().get().getMin(), -0.123f);
    assertEquals((float) columnStats.getRange().get().getMax(), 0.123f);
    columnStats = statisticsMap.get(new DeltaLakeColumnHandle("dou", DOUBLE, REGULAR));
    assertEquals(columnStats.getNullsFraction(), Estimate.zero());
    assertEquals(columnStats.getRange().get().getMin(), -0.321);
    assertEquals(columnStats.getRange().get().getMax(), 0.321);
    columnStats = statisticsMap.get(new DeltaLakeColumnHandle("dat", DATE, REGULAR));
    assertEquals(columnStats.getNullsFraction(), Estimate.zero());
    assertEquals(columnStats.getRange().get().getMin(), (double) LocalDate.parse("1900-01-01").toEpochDay());
    assertEquals(columnStats.getRange().get().getMax(), (double) LocalDate.parse("5000-01-01").toEpochDay());
}
Also used : ColumnStatistics(io.trino.spi.statistics.ColumnStatistics) DeltaLakeColumnHandle(io.trino.plugin.deltalake.DeltaLakeColumnHandle) ColumnHandle(io.trino.spi.connector.ColumnHandle) TableStatistics(io.trino.spi.statistics.TableStatistics) DeltaLakeTableHandle(io.trino.plugin.deltalake.DeltaLakeTableHandle) DeltaLakeColumnHandle(io.trino.plugin.deltalake.DeltaLakeColumnHandle) Test(org.testng.annotations.Test)

Example 4 with DeltaLakeColumnHandle

use of io.trino.plugin.deltalake.DeltaLakeColumnHandle in project trino by trinodb.

the class TestDeltaLakeMetastoreStatistics method testStatisticsParquetParsedStatisticsNaNValues.

@Test
public void testStatisticsParquetParsedStatisticsNaNValues() {
    // The transaction log for this table was created so that the checkpoints only write struct statistics, not json statistics
    // The table has a REAL and DOUBLE columns each with 9 values, one of them being NaN
    DeltaLakeTableHandle tableHandle = registerTable("parquet_struct_statistics_nan");
    TableStatistics stats = deltaLakeMetastore.getTableStatistics(SESSION, tableHandle, Constraint.alwaysTrue());
    assertEquals(stats.getRowCount(), Estimate.of(9));
    Map<ColumnHandle, ColumnStatistics> statisticsMap = stats.getColumnStatistics();
    ColumnStatistics columnStats = statisticsMap.get(new DeltaLakeColumnHandle("fl", REAL, REGULAR));
    assertEquals(columnStats.getNullsFraction(), Estimate.zero());
    assertThat(columnStats.getRange()).isEmpty();
    columnStats = statisticsMap.get(new DeltaLakeColumnHandle("dou", DOUBLE, REGULAR));
    assertEquals(columnStats.getNullsFraction(), Estimate.zero());
    assertThat(columnStats.getRange()).isEmpty();
}
Also used : ColumnStatistics(io.trino.spi.statistics.ColumnStatistics) DeltaLakeColumnHandle(io.trino.plugin.deltalake.DeltaLakeColumnHandle) ColumnHandle(io.trino.spi.connector.ColumnHandle) TableStatistics(io.trino.spi.statistics.TableStatistics) DeltaLakeTableHandle(io.trino.plugin.deltalake.DeltaLakeTableHandle) DeltaLakeColumnHandle(io.trino.plugin.deltalake.DeltaLakeColumnHandle) Test(org.testng.annotations.Test)

Example 5 with DeltaLakeColumnHandle

use of io.trino.plugin.deltalake.DeltaLakeColumnHandle in project trino by trinodb.

the class HiveMetastoreBackedDeltaLakeMetastore method getTableStatistics.

@Override
public TableStatistics getTableStatistics(ConnectorSession session, DeltaLakeTableHandle tableHandle, Constraint constraint) {
    TableSnapshot tableSnapshot = getSnapshot(tableHandle.getSchemaTableName(), session);
    double numRecords = 0L;
    MetadataEntry metadata = transactionLogAccess.getMetadataEntry(tableSnapshot, session).orElseThrow(() -> new TrinoException(DELTA_LAKE_INVALID_SCHEMA, "Metadata not found in transaction log for " + tableHandle.getTableName()));
    List<ColumnMetadata> columnMetadata = DeltaLakeSchemaSupport.extractSchema(metadata, typeManager);
    List<DeltaLakeColumnHandle> columns = columnMetadata.stream().map(columnMeta -> new DeltaLakeColumnHandle(columnMeta.getName(), columnMeta.getType(), metadata.getCanonicalPartitionColumns().contains(columnMeta.getName()) ? PARTITION_KEY : REGULAR)).collect(toImmutableList());
    Map<DeltaLakeColumnHandle, Double> nullCounts = new HashMap<>();
    columns.forEach(column -> nullCounts.put(column, 0.0));
    Map<DeltaLakeColumnHandle, Double> minValues = new HashMap<>();
    Map<DeltaLakeColumnHandle, Double> maxValues = new HashMap<>();
    Map<DeltaLakeColumnHandle, Set<String>> partitioningColumnsDistinctValues = new HashMap<>();
    columns.stream().filter(column -> column.getColumnType() == PARTITION_KEY).forEach(column -> partitioningColumnsDistinctValues.put(column, new HashSet<>()));
    if (tableHandle.getEnforcedPartitionConstraint().isNone() || tableHandle.getNonPartitionConstraint().isNone() || constraint.getSummary().isNone()) {
        return createZeroStatistics(columns);
    }
    Set<String> predicatedColumnNames = tableHandle.getNonPartitionConstraint().getDomains().orElseThrow().keySet().stream().map(DeltaLakeColumnHandle::getName).collect(toImmutableSet());
    List<ColumnMetadata> predicatedColumns = columnMetadata.stream().filter(column -> predicatedColumnNames.contains(column.getName())).collect(toImmutableList());
    for (AddFileEntry addEntry : transactionLogAccess.getActiveFiles(tableSnapshot, session)) {
        Optional<? extends DeltaLakeFileStatistics> fileStatistics = addEntry.getStats();
        if (fileStatistics.isEmpty()) {
            // Open source Delta Lake does not collect stats
            return TableStatistics.empty();
        }
        DeltaLakeFileStatistics stats = fileStatistics.get();
        if (!partitionMatchesPredicate(addEntry.getCanonicalPartitionValues(), tableHandle.getEnforcedPartitionConstraint().getDomains().orElseThrow())) {
            continue;
        }
        TupleDomain<DeltaLakeColumnHandle> statisticsPredicate = createStatisticsPredicate(addEntry, predicatedColumns, tableHandle.getMetadataEntry().getCanonicalPartitionColumns());
        if (!tableHandle.getNonPartitionConstraint().overlaps(statisticsPredicate)) {
            continue;
        }
        if (stats.getNumRecords().isEmpty()) {
            // Not clear if it's possible for stats to be present with no row count, but bail out if that happens
            return TableStatistics.empty();
        }
        numRecords += stats.getNumRecords().get();
        for (DeltaLakeColumnHandle column : columns) {
            if (column.getColumnType() == PARTITION_KEY) {
                Optional<String> partitionValue = addEntry.getCanonicalPartitionValues().get(column.getName());
                if (partitionValue.isEmpty()) {
                    nullCounts.merge(column, (double) stats.getNumRecords().get(), Double::sum);
                } else {
                    // NULL is not counted as a distinct value
                    // Code below assumes that values returned by addEntry.getCanonicalPartitionValues() are normalized,
                    // it may not be true in case of real, doubles, timestamps etc
                    partitioningColumnsDistinctValues.get(column).add(partitionValue.get());
                }
            } else {
                Optional<Long> maybeNullCount = stats.getNullCount(column.getName());
                if (maybeNullCount.isPresent()) {
                    nullCounts.put(column, nullCounts.get(column) + maybeNullCount.get());
                } else {
                    // If any individual file fails to report null counts, fail to calculate the total for the table
                    nullCounts.put(column, NaN);
                }
            }
            // Math.min returns NaN if any operand is NaN
            stats.getMinColumnValue(column).map(parsedValue -> toStatsRepresentation(column.getType(), parsedValue)).filter(OptionalDouble::isPresent).map(OptionalDouble::getAsDouble).ifPresent(parsedValueAsDouble -> minValues.merge(column, parsedValueAsDouble, Math::min));
            stats.getMaxColumnValue(column).map(parsedValue -> toStatsRepresentation(column.getType(), parsedValue)).filter(OptionalDouble::isPresent).map(OptionalDouble::getAsDouble).ifPresent(parsedValueAsDouble -> maxValues.merge(column, parsedValueAsDouble, Math::max));
        }
    }
    if (numRecords == 0) {
        return createZeroStatistics(columns);
    }
    TableStatistics.Builder statsBuilder = new TableStatistics.Builder().setRowCount(Estimate.of(numRecords));
    Optional<DeltaLakeStatistics> statistics = Optional.empty();
    if (isExtendedStatisticsEnabled(session)) {
        statistics = statisticsAccess.readDeltaLakeStatistics(session, tableHandle.getLocation());
    }
    for (DeltaLakeColumnHandle column : columns) {
        ColumnStatistics.Builder columnStatsBuilder = new ColumnStatistics.Builder();
        Double nullCount = nullCounts.get(column);
        columnStatsBuilder.setNullsFraction(nullCount.isNaN() ? Estimate.unknown() : Estimate.of(nullCount / numRecords));
        Double maxValue = maxValues.get(column);
        Double minValue = minValues.get(column);
        if (isValidInRange(maxValue) && isValidInRange(minValue)) {
            columnStatsBuilder.setRange(new DoubleRange(minValue, maxValue));
        } else if (isValidInRange(maxValue)) {
            columnStatsBuilder.setRange(new DoubleRange(NEGATIVE_INFINITY, maxValue));
        } else if (isValidInRange(minValue)) {
            columnStatsBuilder.setRange(new DoubleRange(minValue, POSITIVE_INFINITY));
        }
        // extend statistics with NDV
        if (column.getColumnType() == PARTITION_KEY) {
            columnStatsBuilder.setDistinctValuesCount(Estimate.of(partitioningColumnsDistinctValues.get(column).size()));
        }
        if (statistics.isPresent()) {
            DeltaLakeColumnStatistics deltaLakeColumnStatistics = statistics.get().getColumnStatistics().get(column.getName());
            if (deltaLakeColumnStatistics != null && column.getColumnType() != PARTITION_KEY) {
                columnStatsBuilder.setDistinctValuesCount(Estimate.of(deltaLakeColumnStatistics.getNdvSummary().cardinality()));
            }
        }
        statsBuilder.setColumnStatistics(column, columnStatsBuilder.build());
    }
    return statsBuilder.build();
}
Also used : DeltaLakeStatistics(io.trino.plugin.deltalake.statistics.DeltaLakeStatistics) POSITIVE_INFINITY(java.lang.Double.POSITIVE_INFINITY) PATH_PROPERTY(io.trino.plugin.deltalake.DeltaLakeMetadata.PATH_PROPERTY) TableSnapshot(io.trino.plugin.deltalake.transactionlog.TableSnapshot) Database(io.trino.plugin.hive.metastore.Database) NEGATIVE_INFINITY(java.lang.Double.NEGATIVE_INFINITY) AddFileEntry(io.trino.plugin.deltalake.transactionlog.AddFileEntry) TransactionLogAccess(io.trino.plugin.deltalake.transactionlog.TransactionLogAccess) StatsUtil.toStatsRepresentation(io.trino.spi.statistics.StatsUtil.toStatsRepresentation) TableNotFoundException(io.trino.spi.connector.TableNotFoundException) DeltaLakeMetadata.createStatisticsPredicate(io.trino.plugin.deltalake.DeltaLakeMetadata.createStatisticsPredicate) NaN(java.lang.Double.NaN) DeltaLakeColumnHandle(io.trino.plugin.deltalake.DeltaLakeColumnHandle) Map(java.util.Map) Path(org.apache.hadoop.fs.Path) DeltaLakeTableHandle(io.trino.plugin.deltalake.DeltaLakeTableHandle) Table(io.trino.plugin.hive.metastore.Table) ImmutableList.toImmutableList(com.google.common.collect.ImmutableList.toImmutableList) MetadataEntry(io.trino.plugin.deltalake.transactionlog.MetadataEntry) Set(java.util.Set) DeltaLakeSplitManager.partitionMatchesPredicate(io.trino.plugin.deltalake.DeltaLakeSplitManager.partitionMatchesPredicate) DeltaLakeSchemaSupport(io.trino.plugin.deltalake.transactionlog.DeltaLakeSchemaSupport) TrinoException(io.trino.spi.TrinoException) SchemaTableName(io.trino.spi.connector.SchemaTableName) String.format(java.lang.String.format) List(java.util.List) Optional(java.util.Optional) REGULAR(io.trino.plugin.deltalake.DeltaLakeColumnType.REGULAR) DoubleRange(io.trino.spi.statistics.DoubleRange) Constraint(io.trino.spi.connector.Constraint) DeltaLakeFileStatistics(io.trino.plugin.deltalake.transactionlog.statistics.DeltaLakeFileStatistics) CachingDeltaLakeStatisticsAccess(io.trino.plugin.deltalake.statistics.CachingDeltaLakeStatisticsAccess) ColumnMetadata(io.trino.spi.connector.ColumnMetadata) DeltaLakeColumnStatistics(io.trino.plugin.deltalake.statistics.DeltaLakeColumnStatistics) DELTA_LAKE_INVALID_TABLE(io.trino.plugin.deltalake.DeltaLakeErrorCode.DELTA_LAKE_INVALID_TABLE) OptionalDouble(java.util.OptionalDouble) HashMap(java.util.HashMap) HashSet(java.util.HashSet) HiveMetastore(io.trino.plugin.hive.metastore.HiveMetastore) Objects.requireNonNull(java.util.Objects.requireNonNull) TableStatistics(io.trino.spi.statistics.TableStatistics) ImmutableSet.toImmutableSet(com.google.common.collect.ImmutableSet.toImmutableSet) DeltaLakeSessionProperties.isExtendedStatisticsEnabled(io.trino.plugin.deltalake.DeltaLakeSessionProperties.isExtendedStatisticsEnabled) Estimate(io.trino.spi.statistics.Estimate) ColumnStatistics(io.trino.spi.statistics.ColumnStatistics) PARTITION_KEY(io.trino.plugin.deltalake.DeltaLakeColumnType.PARTITION_KEY) IOException(java.io.IOException) ConnectorSession(io.trino.spi.connector.ConnectorSession) TupleDomain(io.trino.spi.predicate.TupleDomain) DELTA_LAKE_INVALID_SCHEMA(io.trino.plugin.deltalake.DeltaLakeErrorCode.DELTA_LAKE_INVALID_SCHEMA) ProtocolEntry(io.trino.plugin.deltalake.transactionlog.ProtocolEntry) PrincipalPrivileges(io.trino.plugin.hive.metastore.PrincipalPrivileges) TypeManager(io.trino.spi.type.TypeManager) ColumnMetadata(io.trino.spi.connector.ColumnMetadata) Set(java.util.Set) HashSet(java.util.HashSet) ImmutableSet.toImmutableSet(com.google.common.collect.ImmutableSet.toImmutableSet) HashMap(java.util.HashMap) DeltaLakeColumnHandle(io.trino.plugin.deltalake.DeltaLakeColumnHandle) DeltaLakeStatistics(io.trino.plugin.deltalake.statistics.DeltaLakeStatistics) DeltaLakeColumnStatistics(io.trino.plugin.deltalake.statistics.DeltaLakeColumnStatistics) HashSet(java.util.HashSet) DeltaLakeColumnStatistics(io.trino.plugin.deltalake.statistics.DeltaLakeColumnStatistics) ColumnStatistics(io.trino.spi.statistics.ColumnStatistics) DeltaLakeFileStatistics(io.trino.plugin.deltalake.transactionlog.statistics.DeltaLakeFileStatistics) OptionalDouble(java.util.OptionalDouble) OptionalDouble(java.util.OptionalDouble) DoubleRange(io.trino.spi.statistics.DoubleRange) TableSnapshot(io.trino.plugin.deltalake.transactionlog.TableSnapshot) AddFileEntry(io.trino.plugin.deltalake.transactionlog.AddFileEntry) MetadataEntry(io.trino.plugin.deltalake.transactionlog.MetadataEntry) TrinoException(io.trino.spi.TrinoException) TableStatistics(io.trino.spi.statistics.TableStatistics)

Aggregations

DeltaLakeColumnHandle (io.trino.plugin.deltalake.DeltaLakeColumnHandle)9 ColumnStatistics (io.trino.spi.statistics.ColumnStatistics)5 TableStatistics (io.trino.spi.statistics.TableStatistics)5 Test (org.testng.annotations.Test)5 DeltaLakeTableHandle (io.trino.plugin.deltalake.DeltaLakeTableHandle)4 ColumnHandle (io.trino.spi.connector.ColumnHandle)3 ArrayType (io.trino.spi.type.ArrayType)3 MapType (io.trino.spi.type.MapType)3 RowType (io.trino.spi.type.RowType)3 TypeOperators (io.trino.spi.type.TypeOperators)3 ObjectMapper (com.fasterxml.jackson.databind.ObjectMapper)2 ImmutableList.toImmutableList (com.google.common.collect.ImmutableList.toImmutableList)2 REGULAR (io.trino.plugin.deltalake.DeltaLakeColumnType.REGULAR)2 DeltaLakeColumnStatistics (io.trino.plugin.deltalake.statistics.DeltaLakeColumnStatistics)2 ColumnMetadata (io.trino.spi.connector.ColumnMetadata)2 IOException (java.io.IOException)2 URL (java.net.URL)2 List (java.util.List)2 Optional (java.util.Optional)2 JsonProcessingException (com.fasterxml.jackson.core.JsonProcessingException)1