Search in sources :

Example 1 with DeltaLakeTransactionLogEntry

use of io.trino.plugin.deltalake.transactionlog.DeltaLakeTransactionLogEntry in project trino by trinodb.

the class TestCheckpointEntryIterator method testReadAllEntries.

@Test
public void testReadAllEntries() throws Exception {
    URI checkpointUri = getResource(TEST_CHECKPOINT).toURI();
    MetadataEntry metadataEntry = readMetadataEntry(checkpointUri);
    CheckpointEntryIterator checkpointEntryIterator = createCheckpointEntryIterator(checkpointUri, ImmutableSet.of(METADATA, PROTOCOL, TRANSACTION, ADD, REMOVE, COMMIT), Optional.of(readMetadataEntry(checkpointUri)));
    List<DeltaLakeTransactionLogEntry> entries = ImmutableList.copyOf(checkpointEntryIterator);
    assertThat(entries).hasSize(17);
    // MetadataEntry
    assertThat(entries).element(12).extracting(DeltaLakeTransactionLogEntry::getMetaData).isEqualTo(metadataEntry);
    // ProtocolEntry
    assertThat(entries).element(11).extracting(DeltaLakeTransactionLogEntry::getProtocol).isEqualTo(new ProtocolEntry(1, 2));
    // TransactionEntry
    // not found in the checkpoint, TODO add a test
    assertThat(entries).map(DeltaLakeTransactionLogEntry::getTxn).filteredOn(Objects::nonNull).isEmpty();
    // AddFileEntry
    assertThat(entries).element(8).extracting(DeltaLakeTransactionLogEntry::getAdd).isEqualTo(new AddFileEntry("age=42/part-00003-0f53cae3-3e34-4876-b651-e1db9584dbc3.c000.snappy.parquet", Map.of("age", "42"), 2634, 1579190165000L, false, Optional.of("{" + "\"numRecords\":1," + "\"minValues\":{\"name\":\"Alice\",\"address\":{\"street\":\"100 Main St\",\"city\":\"Anytown\",\"state\":\"NY\",\"zip\":\"12345\"},\"income\":111000.0}," + "\"maxValues\":{\"name\":\"Alice\",\"address\":{\"street\":\"100 Main St\",\"city\":\"Anytown\",\"state\":\"NY\",\"zip\":\"12345\"},\"income\":111000.0}," + "\"nullCount\":{\"name\":0,\"married\":0,\"phones\":0,\"address\":{\"street\":0,\"city\":0,\"state\":0,\"zip\":0},\"income\":0}" + "}"), Optional.empty(), null));
    // RemoveFileEntry
    assertThat(entries).element(3).extracting(DeltaLakeTransactionLogEntry::getRemove).isEqualTo(new RemoveFileEntry("age=42/part-00000-951068bd-bcf4-4094-bb94-536f3c41d31f.c000.snappy.parquet", 1579190155406L, false));
    // CommitInfoEntry
    // not found in the checkpoint, TODO add a test
    assertThat(entries).map(DeltaLakeTransactionLogEntry::getCommitInfo).filteredOn(Objects::nonNull).isEmpty();
}
Also used : ProtocolEntry(io.trino.plugin.deltalake.transactionlog.ProtocolEntry) DeltaLakeTransactionLogEntry(io.trino.plugin.deltalake.transactionlog.DeltaLakeTransactionLogEntry) AddFileEntry(io.trino.plugin.deltalake.transactionlog.AddFileEntry) MetadataEntry(io.trino.plugin.deltalake.transactionlog.MetadataEntry) RemoveFileEntry(io.trino.plugin.deltalake.transactionlog.RemoveFileEntry) URI(java.net.URI) Test(org.testng.annotations.Test)

Example 2 with DeltaLakeTransactionLogEntry

use of io.trino.plugin.deltalake.transactionlog.DeltaLakeTransactionLogEntry in project trino by trinodb.

the class TestDeltaLakeFileStatistics method testParseParquetStatistics.

@Test
public void testParseParquetStatistics() throws Exception {
    File statsFile = new File(getClass().getResource("/databricks/pruning/parquet_struct_statistics/_delta_log/00000000000000000010.checkpoint.parquet").getFile());
    Path checkpointPath = new Path(statsFile.toURI());
    TypeManager typeManager = TESTING_TYPE_MANAGER;
    CheckpointSchemaManager checkpointSchemaManager = new CheckpointSchemaManager(typeManager);
    HdfsConfig hdfsConfig = new HdfsConfig();
    HdfsConfiguration hdfsConfiguration = new HiveHdfsConfiguration(new HdfsConfigurationInitializer(hdfsConfig), ImmutableSet.of());
    HdfsEnvironment hdfsEnvironment = new HdfsEnvironment(hdfsConfiguration, hdfsConfig, new NoHdfsAuthentication());
    FileSystem fs = hdfsEnvironment.getFileSystem(new HdfsEnvironment.HdfsContext(SESSION), checkpointPath);
    CheckpointEntryIterator metadataEntryIterator = new CheckpointEntryIterator(checkpointPath, SESSION, fs.getFileStatus(checkpointPath).getLen(), checkpointSchemaManager, typeManager, ImmutableSet.of(METADATA), Optional.empty(), hdfsEnvironment, new FileFormatDataSourceStats(), new ParquetReaderConfig().toParquetReaderOptions(), true);
    MetadataEntry metadataEntry = getOnlyElement(metadataEntryIterator).getMetaData();
    CheckpointEntryIterator checkpointEntryIterator = new CheckpointEntryIterator(checkpointPath, SESSION, fs.getFileStatus(checkpointPath).getLen(), checkpointSchemaManager, typeManager, ImmutableSet.of(CheckpointEntryIterator.EntryType.ADD), Optional.of(metadataEntry), hdfsEnvironment, new FileFormatDataSourceStats(), new ParquetReaderConfig().toParquetReaderOptions(), true);
    DeltaLakeTransactionLogEntry matchingAddFileEntry = null;
    while (checkpointEntryIterator.hasNext()) {
        DeltaLakeTransactionLogEntry entry = checkpointEntryIterator.next();
        if (entry.getAdd() != null && entry.getAdd().getPath().contains("part-00000-17951bea-0d04-43c1-979c-ea1fac19b382-c000.snappy.parquet")) {
            assertNull(matchingAddFileEntry);
            matchingAddFileEntry = entry;
        }
    }
    assertNotNull(matchingAddFileEntry);
    assertThat(matchingAddFileEntry.getAdd().getStats()).isPresent();
    testStatisticsValues(matchingAddFileEntry.getAdd().getStats().get());
}
Also used : Path(org.apache.hadoop.fs.Path) HdfsConfigurationInitializer(io.trino.plugin.hive.HdfsConfigurationInitializer) HiveHdfsConfiguration(io.trino.plugin.hive.HiveHdfsConfiguration) DeltaLakeTransactionLogEntry(io.trino.plugin.deltalake.transactionlog.DeltaLakeTransactionLogEntry) HdfsConfig(io.trino.plugin.hive.HdfsConfig) FileFormatDataSourceStats(io.trino.plugin.hive.FileFormatDataSourceStats) CheckpointEntryIterator(io.trino.plugin.deltalake.transactionlog.checkpoint.CheckpointEntryIterator) HiveHdfsConfiguration(io.trino.plugin.hive.HiveHdfsConfiguration) HdfsConfiguration(io.trino.plugin.hive.HdfsConfiguration) NoHdfsAuthentication(io.trino.plugin.hive.authentication.NoHdfsAuthentication) HdfsEnvironment(io.trino.plugin.hive.HdfsEnvironment) CheckpointSchemaManager(io.trino.plugin.deltalake.transactionlog.checkpoint.CheckpointSchemaManager) FileSystem(org.apache.hadoop.fs.FileSystem) TypeManager(io.trino.spi.type.TypeManager) MetadataEntry(io.trino.plugin.deltalake.transactionlog.MetadataEntry) File(java.io.File) ParquetReaderConfig(io.trino.plugin.hive.parquet.ParquetReaderConfig) Test(org.testng.annotations.Test)

Example 3 with DeltaLakeTransactionLogEntry

use of io.trino.plugin.deltalake.transactionlog.DeltaLakeTransactionLogEntry in project trino by trinodb.

the class TestCheckpointEntryIterator method testReadAddEntries.

@Test
public void testReadAddEntries() throws Exception {
    URI checkpointUri = getResource(TEST_CHECKPOINT).toURI();
    CheckpointEntryIterator checkpointEntryIterator = createCheckpointEntryIterator(checkpointUri, ImmutableSet.of(ADD), Optional.of(readMetadataEntry(checkpointUri)));
    List<DeltaLakeTransactionLogEntry> entries = ImmutableList.copyOf(checkpointEntryIterator);
    assertThat(entries).hasSize(9);
    assertThat(entries).element(3).extracting(DeltaLakeTransactionLogEntry::getAdd).isEqualTo(new AddFileEntry("age=42/part-00003-0f53cae3-3e34-4876-b651-e1db9584dbc3.c000.snappy.parquet", Map.of("age", "42"), 2634, 1579190165000L, false, Optional.of("{" + "\"numRecords\":1," + "\"minValues\":{\"name\":\"Alice\",\"address\":{\"street\":\"100 Main St\",\"city\":\"Anytown\",\"state\":\"NY\",\"zip\":\"12345\"},\"income\":111000.0}," + "\"maxValues\":{\"name\":\"Alice\",\"address\":{\"street\":\"100 Main St\",\"city\":\"Anytown\",\"state\":\"NY\",\"zip\":\"12345\"},\"income\":111000.0}," + "\"nullCount\":{\"name\":0,\"married\":0,\"phones\":0,\"address\":{\"street\":0,\"city\":0,\"state\":0,\"zip\":0},\"income\":0}" + "}"), Optional.empty(), null));
    assertThat(entries).element(7).extracting(DeltaLakeTransactionLogEntry::getAdd).isEqualTo(new AddFileEntry("age=30/part-00002-5800be2e-2373-47d8-8b86-776a8ea9d69f.c000.snappy.parquet", Map.of("age", "30"), 2688, 1579190165000L, false, Optional.of("{" + "\"numRecords\":1," + "\"minValues\":{\"name\":\"Andy\",\"address\":{\"street\":\"101 Main St\",\"city\":\"Anytown\",\"state\":\"NY\",\"zip\":\"12345\"},\"income\":81000.0}," + "\"maxValues\":{\"name\":\"Andy\",\"address\":{\"street\":\"101 Main St\",\"city\":\"Anytown\",\"state\":\"NY\",\"zip\":\"12345\"},\"income\":81000.0}," + "\"nullCount\":{\"name\":0,\"married\":0,\"phones\":0,\"address\":{\"street\":0,\"city\":0,\"state\":0,\"zip\":0},\"income\":0}" + "}"), Optional.empty(), null));
}
Also used : DeltaLakeTransactionLogEntry(io.trino.plugin.deltalake.transactionlog.DeltaLakeTransactionLogEntry) AddFileEntry(io.trino.plugin.deltalake.transactionlog.AddFileEntry) URI(java.net.URI) Test(org.testng.annotations.Test)

Example 4 with DeltaLakeTransactionLogEntry

use of io.trino.plugin.deltalake.transactionlog.DeltaLakeTransactionLogEntry in project trino by trinodb.

the class TransactionLogTail method loadNewTail.

// Load a section of the Transaction Log JSON entries. Optionally from a given start version (exclusive) through an end version (inclusive)
public static TransactionLogTail loadNewTail(FileSystem fileSystem, Path tableLocation, Optional<Long> startVersion, Optional<Long> endVersion) throws IOException {
    ImmutableList.Builder<DeltaLakeTransactionLogEntry> entriesBuilder = ImmutableList.builder();
    long version = startVersion.orElse(0L);
    long entryNumber = startVersion.map(start -> start + 1).orElse(0L);
    Path transactionLogDir = getTransactionLogDir(tableLocation);
    Optional<List<DeltaLakeTransactionLogEntry>> results;
    boolean endOfTail = false;
    while (!endOfTail) {
        results = getEntriesFromJson(getTransactionLogJsonEntryPath(transactionLogDir, entryNumber), fileSystem);
        if (results.isPresent()) {
            entriesBuilder.addAll(results.get());
            version = entryNumber;
            entryNumber++;
        } else {
            endOfTail = true;
        }
        if (endVersion.isPresent() && version == endVersion.get()) {
            endOfTail = true;
        }
    }
    return new TransactionLogTail(entriesBuilder.build(), version);
}
Also used : TransactionLogUtil.getTransactionLogDir(io.trino.plugin.deltalake.transactionlog.TransactionLogUtil.getTransactionLogDir) FileSystem(org.apache.hadoop.fs.FileSystem) UTF_8(java.nio.charset.StandardCharsets.UTF_8) IOException(java.io.IOException) InputStreamReader(java.io.InputStreamReader) FileNotFoundException(java.io.FileNotFoundException) TransactionLogUtil.getTransactionLogJsonEntryPath(io.trino.plugin.deltalake.transactionlog.TransactionLogUtil.getTransactionLogJsonEntryPath) DeltaLakeTransactionLogEntry(io.trino.plugin.deltalake.transactionlog.DeltaLakeTransactionLogEntry) List(java.util.List) ImmutableList(com.google.common.collect.ImmutableList) TransactionLogParser.parseJson(io.trino.plugin.deltalake.transactionlog.TransactionLogParser.parseJson) Objects.requireNonNull(java.util.Objects.requireNonNull) Path(org.apache.hadoop.fs.Path) Optional(java.util.Optional) BufferedReader(java.io.BufferedReader) TransactionLogUtil.getTransactionLogJsonEntryPath(io.trino.plugin.deltalake.transactionlog.TransactionLogUtil.getTransactionLogJsonEntryPath) Path(org.apache.hadoop.fs.Path) ImmutableList(com.google.common.collect.ImmutableList) DeltaLakeTransactionLogEntry(io.trino.plugin.deltalake.transactionlog.DeltaLakeTransactionLogEntry) List(java.util.List) ImmutableList(com.google.common.collect.ImmutableList)

Example 5 with DeltaLakeTransactionLogEntry

use of io.trino.plugin.deltalake.transactionlog.DeltaLakeTransactionLogEntry in project trino by trinodb.

the class VacuumProcedure method doVacuum.

private void doVacuum(ConnectorSession session, String schema, String table, String retention) throws IOException {
    checkProcedureArgument(schema != null, "schema_name cannot be null");
    checkProcedureArgument(!schema.isEmpty(), "schema_name cannot be empty");
    checkProcedureArgument(table != null, "table_name cannot be null");
    checkProcedureArgument(!table.isEmpty(), "table_name cannot be empty");
    checkProcedureArgument(retention != null, "retention cannot be null");
    Duration retentionDuration = Duration.valueOf(retention);
    Duration minRetention = getVacuumMinRetention(session);
    checkProcedureArgument(retentionDuration.compareTo(minRetention) >= 0, "Retention specified (%s) is shorter than the minimum retention configured in the system (%s). " + "Minimum retention can be changed with %s configuration property or %s.%s session property", retentionDuration, minRetention, DeltaLakeConfig.VACUUM_MIN_RETENTION, catalogName, DeltaLakeSessionProperties.VACUUM_MIN_RETENTION);
    Instant threshold = Instant.now().minusMillis(retentionDuration.toMillis());
    DeltaLakeMetadata metadata = metadataFactory.create(session.getIdentity());
    SchemaTableName tableName = new SchemaTableName(schema, table);
    DeltaLakeTableHandle handle = metadata.getTableHandle(session, tableName);
    checkProcedureArgument(handle != null, "Table '%s' does not exist", tableName);
    TableSnapshot tableSnapshot = transactionLogAccess.loadSnapshot(tableName, new Path(handle.getLocation()), session);
    Path tableLocation = tableSnapshot.getTableLocation();
    Path transactionLogDir = getTransactionLogDir(tableLocation);
    FileSystem fileSystem = hdfsEnvironment.getFileSystem(new HdfsEnvironment.HdfsContext(session), tableLocation);
    String commonPathPrefix = tableLocation + "/";
    String queryId = session.getQueryId();
    // Retain all active files and every file removed by a "recent" transaction (except for the oldest "recent").
    // Any remaining file are not live, and not needed to read any "recent" snapshot.
    List<Long> recentVersions = transactionLogAccess.getPastTableVersions(fileSystem, transactionLogDir, threshold, tableSnapshot.getVersion());
    Set<String> retainedPaths = Stream.concat(transactionLogAccess.getActiveFiles(tableSnapshot, session).stream().map(AddFileEntry::getPath), transactionLogAccess.getJsonEntries(fileSystem, transactionLogDir, // active files, but still needed to read a "recent" snapshot
    recentVersions.stream().sorted(naturalOrder()).skip(1).collect(toImmutableList())).map(DeltaLakeTransactionLogEntry::getRemove).filter(Objects::nonNull).map(RemoveFileEntry::getPath)).peek(path -> checkState(!path.startsWith(tableLocation.toString()), "Unexpected absolute path in transaction log: %s", path)).collect(toImmutableSet());
    log.debug("[%s] attempting to vacuum table %s [%s] with %s retention (expiry threshold %s). %s data file paths marked for retention", queryId, tableName, tableLocation, retention, threshold, retainedPaths.size());
    long nonFiles = 0;
    long allPathsChecked = 0;
    long transactionLogFiles = 0;
    long retainedKnownFiles = 0;
    long retainedUnknownFiles = 0;
    long removedFiles = 0;
    RemoteIterator<LocatedFileStatus> listing = fileSystem.listFiles(tableLocation, true);
    while (listing.hasNext()) {
        LocatedFileStatus fileStatus = listing.next();
        Path path = fileStatus.getPath();
        checkState(path.toString().startsWith(commonPathPrefix), "Unexpected path [%s] returned when listing files under [%s]", path, tableLocation);
        String relativePath = path.toString().substring(commonPathPrefix.length());
        if (relativePath.isEmpty()) {
            // A file returned for "tableLocation/", might be possible on S3.
            continue;
        }
        allPathsChecked++;
        // TODO Note: Databricks can delete directories during vacuum on s3. This might need to be revisited.
        if (!fileStatus.isFile()) {
            nonFiles++;
            continue;
        }
        // ignore tableLocation/_delta_log/**
        if (relativePath.equals(TRANSACTION_LOG_DIRECTORY) || relativePath.startsWith(TRANSACTION_LOG_DIRECTORY + "/")) {
            log.debug("[%s] skipping a file inside transaction log dir: %s", queryId, path);
            transactionLogFiles++;
            continue;
        }
        // skip retained files
        if (retainedPaths.contains(relativePath)) {
            log.debug("[%s] retaining a known file: %s", queryId, path);
            retainedKnownFiles++;
            continue;
        }
        // ignore recently created files
        long modificationTime = fileStatus.getModificationTime();
        Instant modificationInstant = Instant.ofEpochMilli(modificationTime);
        if (!modificationInstant.isBefore(threshold)) {
            log.debug("[%s] retaining an unknown file %s with modification time %s (%s)", queryId, path, modificationTime, modificationInstant);
            retainedUnknownFiles++;
            continue;
        }
        log.debug("[%s] deleting file [%s] with modification time %s (%s)", queryId, path, modificationTime, modificationInstant);
        if (!fileSystem.delete(path, false)) {
            throw new TrinoException(GENERIC_INTERNAL_ERROR, "Failed to delete file: " + path);
        }
        removedFiles++;
    }
    log.info("[%s] finished vacuuming table %s [%s]: files checked: %s; non-files: %s; metadata files: %s; retained known files: %s; retained unknown files: %s; removed files: %s", queryId, tableName, tableLocation, allPathsChecked, nonFiles, transactionLogFiles, retainedKnownFiles, retainedUnknownFiles, removedFiles);
}
Also used : Path(org.apache.hadoop.fs.Path) TransactionLogUtil.getTransactionLogDir(io.trino.plugin.deltalake.transactionlog.TransactionLogUtil.getTransactionLogDir) MethodHandle(java.lang.invoke.MethodHandle) Provider(javax.inject.Provider) Comparator.naturalOrder(java.util.Comparator.naturalOrder) Logger(io.airlift.log.Logger) FileSystem(org.apache.hadoop.fs.FileSystem) DeltaLakeMetadataFactory(io.trino.plugin.deltalake.DeltaLakeMetadataFactory) TableSnapshot(io.trino.plugin.deltalake.transactionlog.TableSnapshot) MethodHandleUtil.methodHandle(io.trino.spi.block.MethodHandleUtil.methodHandle) DeltaLakeSessionProperties.getVacuumMinRetention(io.trino.plugin.deltalake.DeltaLakeSessionProperties.getVacuumMinRetention) Duration(io.airlift.units.Duration) AddFileEntry(io.trino.plugin.deltalake.transactionlog.AddFileEntry) RemoveFileEntry(io.trino.plugin.deltalake.transactionlog.RemoveFileEntry) DeltaLakeTransactionLogEntry(io.trino.plugin.deltalake.transactionlog.DeltaLakeTransactionLogEntry) TransactionLogAccess(io.trino.plugin.deltalake.transactionlog.TransactionLogAccess) Inject(javax.inject.Inject) VARCHAR(io.trino.spi.type.VarcharType.VARCHAR) ImmutableList(com.google.common.collect.ImmutableList) Procedure(io.trino.spi.procedure.Procedure) DeltaLakeMetadata(io.trino.plugin.deltalake.DeltaLakeMetadata) Objects.requireNonNull(java.util.Objects.requireNonNull) Path(org.apache.hadoop.fs.Path) ImmutableSet.toImmutableSet(com.google.common.collect.ImmutableSet.toImmutableSet) DeltaLakeTableHandle(io.trino.plugin.deltalake.DeltaLakeTableHandle) Procedures.checkProcedureArgument(io.trino.plugin.deltalake.procedure.Procedures.checkProcedureArgument) Argument(io.trino.spi.procedure.Procedure.Argument) TRANSACTION_LOG_DIRECTORY(io.trino.plugin.deltalake.transactionlog.TransactionLogUtil.TRANSACTION_LOG_DIRECTORY) HdfsEnvironment(io.trino.plugin.hive.HdfsEnvironment) ImmutableList.toImmutableList(com.google.common.collect.ImmutableList.toImmutableList) LocatedFileStatus(org.apache.hadoop.fs.LocatedFileStatus) Set(java.util.Set) TrinoException(io.trino.spi.TrinoException) IOException(java.io.IOException) ConnectorSession(io.trino.spi.connector.ConnectorSession) Instant(java.time.Instant) CatalogName(io.trino.plugin.base.CatalogName) ThreadContextClassLoader(io.trino.spi.classloader.ThreadContextClassLoader) SchemaTableName(io.trino.spi.connector.SchemaTableName) GENERIC_INTERNAL_ERROR(io.trino.spi.StandardErrorCode.GENERIC_INTERNAL_ERROR) String.format(java.lang.String.format) Preconditions.checkState(com.google.common.base.Preconditions.checkState) Objects(java.util.Objects) List(java.util.List) Stream(java.util.stream.Stream) DeltaLakeConfig(io.trino.plugin.deltalake.DeltaLakeConfig) DeltaLakeSessionProperties(io.trino.plugin.deltalake.DeltaLakeSessionProperties) RemoteIterator(org.apache.hadoop.fs.RemoteIterator) Instant(java.time.Instant) DeltaLakeMetadata(io.trino.plugin.deltalake.DeltaLakeMetadata) LocatedFileStatus(org.apache.hadoop.fs.LocatedFileStatus) Duration(io.airlift.units.Duration) SchemaTableName(io.trino.spi.connector.SchemaTableName) HdfsEnvironment(io.trino.plugin.hive.HdfsEnvironment) FileSystem(org.apache.hadoop.fs.FileSystem) TableSnapshot(io.trino.plugin.deltalake.transactionlog.TableSnapshot) Objects(java.util.Objects) TrinoException(io.trino.spi.TrinoException) DeltaLakeTableHandle(io.trino.plugin.deltalake.DeltaLakeTableHandle)

Aggregations

DeltaLakeTransactionLogEntry (io.trino.plugin.deltalake.transactionlog.DeltaLakeTransactionLogEntry)10 Path (org.apache.hadoop.fs.Path)6 FileSystem (org.apache.hadoop.fs.FileSystem)5 ImmutableList (com.google.common.collect.ImmutableList)4 HdfsEnvironment (io.trino.plugin.hive.HdfsEnvironment)4 IOException (java.io.IOException)4 AddFileEntry (io.trino.plugin.deltalake.transactionlog.AddFileEntry)3 TransactionLogUtil.getTransactionLogJsonEntryPath (io.trino.plugin.deltalake.transactionlog.TransactionLogUtil.getTransactionLogJsonEntryPath)3 List (java.util.List)3 Test (org.testng.annotations.Test)3 MetadataEntry (io.trino.plugin.deltalake.transactionlog.MetadataEntry)2 RemoveFileEntry (io.trino.plugin.deltalake.transactionlog.RemoveFileEntry)2 TransactionLogUtil.getTransactionLogDir (io.trino.plugin.deltalake.transactionlog.TransactionLogUtil.getTransactionLogDir)2 FileFormatDataSourceStats (io.trino.plugin.hive.FileFormatDataSourceStats)2 URI (java.net.URI)2 Objects.requireNonNull (java.util.Objects.requireNonNull)2 Preconditions.checkState (com.google.common.base.Preconditions.checkState)1 ImmutableList.toImmutableList (com.google.common.collect.ImmutableList.toImmutableList)1 ImmutableSet.toImmutableSet (com.google.common.collect.ImmutableSet.toImmutableSet)1 Logger (io.airlift.log.Logger)1