use of io.trino.plugin.deltalake.DeltaLakeMetadata in project trino by trinodb.
the class TestDeltaLakeGlueMetastore method testHideNonDeltaLakeTable.
@Test
public void testHideNonDeltaLakeTable() throws Exception {
SchemaTableName deltaLakeTable = new SchemaTableName(databaseName, "delta_lake_table_" + randomName());
SchemaTableName nonDeltaLakeTable1 = new SchemaTableName(databaseName, "hive_table_" + randomName());
SchemaTableName nonDeltaLakeTable2 = new SchemaTableName(databaseName, "hive_table_" + randomName());
String deltaLakeTableLocation = tableLocation(deltaLakeTable);
createTable(deltaLakeTable, deltaLakeTableLocation, tableBuilder -> {
tableBuilder.setParameter(TABLE_PROVIDER_PROPERTY, TABLE_PROVIDER_VALUE);
tableBuilder.setParameter(LOCATION_PROPERTY, deltaLakeTableLocation);
tableBuilder.getStorageBuilder().setStorageFormat(DELTA_STORAGE_FORMAT).setSerdeParameters(ImmutableMap.of(DeltaLakeMetadata.PATH_PROPERTY, deltaLakeTableLocation)).setLocation(deltaLakeTableLocation);
});
createTransactionLog(deltaLakeTableLocation);
createTable(nonDeltaLakeTable1, tableLocation(nonDeltaLakeTable1), tableBuilder -> {
});
createTable(nonDeltaLakeTable2, tableLocation(nonDeltaLakeTable2), tableBuilder -> tableBuilder.setParameter(TABLE_PROVIDER_PROPERTY, "foo"));
DeltaLakeMetadata metadata = metadataFactory.create(SESSION.getIdentity());
// Verify the tables were created as non Delta Lake tables
assertThatThrownBy(() -> metadata.getTableHandle(session, nonDeltaLakeTable1)).isInstanceOf(TrinoException.class).hasMessage(format("%s is not a Delta Lake table", nonDeltaLakeTable1));
assertThatThrownBy(() -> metadata.getTableHandle(session, nonDeltaLakeTable2)).isInstanceOf(TrinoException.class).hasMessage(format("%s is not a Delta Lake table", nonDeltaLakeTable2));
// TODO (https://github.com/trinodb/trino/issues/5426)
// these assertions should use information_schema instead of metadata directly,
// as information_schema or MetadataManager may apply additional logic
// list all tables
assertThat(metadata.listTables(session, Optional.empty())).contains(deltaLakeTable).doesNotContain(nonDeltaLakeTable1).doesNotContain(nonDeltaLakeTable2);
// list all tables in a schema
assertThat(metadata.listTables(session, Optional.of(databaseName))).contains(deltaLakeTable).doesNotContain(nonDeltaLakeTable1).doesNotContain(nonDeltaLakeTable2);
// list all columns in a schema
assertThat(listTableColumns(metadata, new SchemaTablePrefix(databaseName))).contains(deltaLakeTable).doesNotContain(nonDeltaLakeTable1).doesNotContain(nonDeltaLakeTable2);
// list all columns in a table
assertThat(listTableColumns(metadata, new SchemaTablePrefix(databaseName, deltaLakeTable.getTableName()))).contains(deltaLakeTable).doesNotContain(nonDeltaLakeTable1).doesNotContain(nonDeltaLakeTable2);
assertThat(listTableColumns(metadata, new SchemaTablePrefix(databaseName, nonDeltaLakeTable1.getTableName()))).isEmpty();
assertThat(listTableColumns(metadata, new SchemaTablePrefix(databaseName, nonDeltaLakeTable2.getTableName()))).isEmpty();
}
use of io.trino.plugin.deltalake.DeltaLakeMetadata in project trino by trinodb.
the class DropExtendedStatsProcedure method dropStats.
public void dropStats(ConnectorSession session, String schema, String table) {
checkProcedureArgument(schema != null, "schema_name cannot be null");
checkProcedureArgument(table != null, "table_name cannot be null");
SchemaTableName name = new SchemaTableName(schema, table);
DeltaLakeMetadata metadata = metadataFactory.create(session.getIdentity());
if (metadata.getTableHandle(session, name) == null) {
throw new TrinoException(INVALID_PROCEDURE_ARGUMENT, format("Table '%s' does not exist", name));
}
statsAccess.deleteDeltaLakeStatistics(session, metadata.getMetastore().getTableLocation(name, session));
}
use of io.trino.plugin.deltalake.DeltaLakeMetadata in project trino by trinodb.
the class VacuumProcedure method doVacuum.
private void doVacuum(ConnectorSession session, String schema, String table, String retention) throws IOException {
checkProcedureArgument(schema != null, "schema_name cannot be null");
checkProcedureArgument(!schema.isEmpty(), "schema_name cannot be empty");
checkProcedureArgument(table != null, "table_name cannot be null");
checkProcedureArgument(!table.isEmpty(), "table_name cannot be empty");
checkProcedureArgument(retention != null, "retention cannot be null");
Duration retentionDuration = Duration.valueOf(retention);
Duration minRetention = getVacuumMinRetention(session);
checkProcedureArgument(retentionDuration.compareTo(minRetention) >= 0, "Retention specified (%s) is shorter than the minimum retention configured in the system (%s). " + "Minimum retention can be changed with %s configuration property or %s.%s session property", retentionDuration, minRetention, DeltaLakeConfig.VACUUM_MIN_RETENTION, catalogName, DeltaLakeSessionProperties.VACUUM_MIN_RETENTION);
Instant threshold = Instant.now().minusMillis(retentionDuration.toMillis());
DeltaLakeMetadata metadata = metadataFactory.create(session.getIdentity());
SchemaTableName tableName = new SchemaTableName(schema, table);
DeltaLakeTableHandle handle = metadata.getTableHandle(session, tableName);
checkProcedureArgument(handle != null, "Table '%s' does not exist", tableName);
TableSnapshot tableSnapshot = transactionLogAccess.loadSnapshot(tableName, new Path(handle.getLocation()), session);
Path tableLocation = tableSnapshot.getTableLocation();
Path transactionLogDir = getTransactionLogDir(tableLocation);
FileSystem fileSystem = hdfsEnvironment.getFileSystem(new HdfsEnvironment.HdfsContext(session), tableLocation);
String commonPathPrefix = tableLocation + "/";
String queryId = session.getQueryId();
// Retain all active files and every file removed by a "recent" transaction (except for the oldest "recent").
// Any remaining file are not live, and not needed to read any "recent" snapshot.
List<Long> recentVersions = transactionLogAccess.getPastTableVersions(fileSystem, transactionLogDir, threshold, tableSnapshot.getVersion());
Set<String> retainedPaths = Stream.concat(transactionLogAccess.getActiveFiles(tableSnapshot, session).stream().map(AddFileEntry::getPath), transactionLogAccess.getJsonEntries(fileSystem, transactionLogDir, // active files, but still needed to read a "recent" snapshot
recentVersions.stream().sorted(naturalOrder()).skip(1).collect(toImmutableList())).map(DeltaLakeTransactionLogEntry::getRemove).filter(Objects::nonNull).map(RemoveFileEntry::getPath)).peek(path -> checkState(!path.startsWith(tableLocation.toString()), "Unexpected absolute path in transaction log: %s", path)).collect(toImmutableSet());
log.debug("[%s] attempting to vacuum table %s [%s] with %s retention (expiry threshold %s). %s data file paths marked for retention", queryId, tableName, tableLocation, retention, threshold, retainedPaths.size());
long nonFiles = 0;
long allPathsChecked = 0;
long transactionLogFiles = 0;
long retainedKnownFiles = 0;
long retainedUnknownFiles = 0;
long removedFiles = 0;
RemoteIterator<LocatedFileStatus> listing = fileSystem.listFiles(tableLocation, true);
while (listing.hasNext()) {
LocatedFileStatus fileStatus = listing.next();
Path path = fileStatus.getPath();
checkState(path.toString().startsWith(commonPathPrefix), "Unexpected path [%s] returned when listing files under [%s]", path, tableLocation);
String relativePath = path.toString().substring(commonPathPrefix.length());
if (relativePath.isEmpty()) {
// A file returned for "tableLocation/", might be possible on S3.
continue;
}
allPathsChecked++;
// TODO Note: Databricks can delete directories during vacuum on s3. This might need to be revisited.
if (!fileStatus.isFile()) {
nonFiles++;
continue;
}
// ignore tableLocation/_delta_log/**
if (relativePath.equals(TRANSACTION_LOG_DIRECTORY) || relativePath.startsWith(TRANSACTION_LOG_DIRECTORY + "/")) {
log.debug("[%s] skipping a file inside transaction log dir: %s", queryId, path);
transactionLogFiles++;
continue;
}
// skip retained files
if (retainedPaths.contains(relativePath)) {
log.debug("[%s] retaining a known file: %s", queryId, path);
retainedKnownFiles++;
continue;
}
// ignore recently created files
long modificationTime = fileStatus.getModificationTime();
Instant modificationInstant = Instant.ofEpochMilli(modificationTime);
if (!modificationInstant.isBefore(threshold)) {
log.debug("[%s] retaining an unknown file %s with modification time %s (%s)", queryId, path, modificationTime, modificationInstant);
retainedUnknownFiles++;
continue;
}
log.debug("[%s] deleting file [%s] with modification time %s (%s)", queryId, path, modificationTime, modificationInstant);
if (!fileSystem.delete(path, false)) {
throw new TrinoException(GENERIC_INTERNAL_ERROR, "Failed to delete file: " + path);
}
removedFiles++;
}
log.info("[%s] finished vacuuming table %s [%s]: files checked: %s; non-files: %s; metadata files: %s; retained known files: %s; retained unknown files: %s; removed files: %s", queryId, tableName, tableLocation, allPathsChecked, nonFiles, transactionLogFiles, retainedKnownFiles, retainedUnknownFiles, removedFiles);
}
Aggregations