use of io.trino.plugin.hive.HdfsEnvironment in project trino by trinodb.
the class TestSharedGlueMetastore method createQueryRunner.
@Override
protected QueryRunner createQueryRunner() throws Exception {
Session icebergSession = testSessionBuilder().setCatalog(ICEBERG_CATALOG).setSchema(schema).build();
Session hiveSession = testSessionBuilder().setCatalog(HIVE_CATALOG).setSchema(schema).build();
DistributedQueryRunner queryRunner = DistributedQueryRunner.builder(icebergSession).build();
queryRunner.installPlugin(new TpchPlugin());
queryRunner.createCatalog("tpch", "tpch");
this.dataDirectory = queryRunner.getCoordinator().getBaseDataDir().resolve("iceberg_data");
this.dataDirectory.toFile().deleteOnExit();
queryRunner.installPlugin(new IcebergPlugin());
queryRunner.createCatalog(ICEBERG_CATALOG, "iceberg", ImmutableMap.of("iceberg.catalog.type", "glue", "hive.metastore.glue.default-warehouse-dir", dataDirectory.toString()));
HdfsConfig hdfsConfig = new HdfsConfig();
HdfsEnvironment hdfsEnvironment = new HdfsEnvironment(new HiveHdfsConfiguration(new HdfsConfigurationInitializer(hdfsConfig), ImmutableSet.of()), hdfsConfig, new NoHdfsAuthentication());
this.glueMetastore = new GlueHiveMetastore(hdfsEnvironment, new GlueHiveMetastoreConfig(), directExecutor(), new DefaultGlueColumnStatisticsProviderFactory(new GlueHiveMetastoreConfig(), directExecutor(), directExecutor()), Optional.empty(), table -> true);
queryRunner.installPlugin(new TestingHivePlugin(glueMetastore));
queryRunner.createCatalog(HIVE_CATALOG, "hive");
queryRunner.createCatalog("hive_with_redirections", "hive", ImmutableMap.of("hive.iceberg-catalog-name", "iceberg"));
queryRunner.execute("CREATE SCHEMA " + schema + " WITH (location = '" + dataDirectory.toString() + "')");
copyTpchTables(queryRunner, "tpch", TINY_SCHEMA_NAME, icebergSession, ImmutableList.of(TpchTable.NATION));
copyTpchTables(queryRunner, "tpch", TINY_SCHEMA_NAME, hiveSession, ImmutableList.of(TpchTable.REGION));
return queryRunner;
}
use of io.trino.plugin.hive.HdfsEnvironment in project trino by trinodb.
the class CheckpointWriterManager method writeCheckpoint.
public void writeCheckpoint(ConnectorSession session, TableSnapshot snapshot) {
try {
SchemaTableName table = snapshot.getTable();
long newCheckpointVersion = snapshot.getVersion();
snapshot.getLastCheckpointVersion().ifPresent(lastCheckpoint -> checkArgument(newCheckpointVersion > lastCheckpoint, "written checkpoint %s for table %s must be greater than last checkpoint version %s", newCheckpointVersion, table, lastCheckpoint));
CheckpointBuilder checkpointBuilder = new CheckpointBuilder();
FileSystem fileSystem = hdfsEnvironment.getFileSystem(new HdfsEnvironment.HdfsContext(session), snapshot.getTableLocation());
Optional<DeltaLakeTransactionLogEntry> checkpointMetadataLogEntry = snapshot.getCheckpointTransactionLogEntries(session, ImmutableSet.of(METADATA), checkpointSchemaManager, typeManager, fileSystem, hdfsEnvironment, fileFormatDataSourceStats).collect(toOptional());
if (checkpointMetadataLogEntry.isPresent()) {
// todo HACK: this call is required only to ensure that cachedMetadataEntry is set in snapshot, so we can read add entries below
// this should be reworked so we pass metadata entry explicitly to getCheckpointTransactionLogEntries, and we should get rid of `setCachedMetadata` in TableSnapshot to make it immutable.
// Also more proper would be to use metadata entry obtained above in snapshot.getCheckpointTransactionLogEntries to read other checkpoint entries, but using newer one should not do harm.
checkState(transactionLogAccess.getMetadataEntry(snapshot, session).isPresent(), "metadata entry in snapshot null");
// register metadata entry in writer
checkState(checkpointMetadataLogEntry.get().getMetaData() != null, "metaData not present in log entry");
checkpointBuilder.addLogEntry(checkpointMetadataLogEntry.get());
// read remaining entries from checkpoint register them in writer
snapshot.getCheckpointTransactionLogEntries(session, ImmutableSet.of(PROTOCOL, TRANSACTION, ADD, REMOVE, COMMIT), checkpointSchemaManager, typeManager, fileSystem, hdfsEnvironment, fileFormatDataSourceStats).forEach(checkpointBuilder::addLogEntry);
}
snapshot.getJsonTransactionLogEntries().forEach(checkpointBuilder::addLogEntry);
Path transactionLogDirectory = getTransactionLogDir(snapshot.getTableLocation());
Path targetFile = new Path(transactionLogDirectory, String.format("%020d.checkpoint.parquet", newCheckpointVersion));
CheckpointWriter checkpointWriter = new CheckpointWriter(typeManager, checkpointSchemaManager, hdfsEnvironment);
CheckpointEntries checkpointEntries = checkpointBuilder.build();
checkpointWriter.write(session, checkpointEntries, targetFile);
// update last checkpoint file
LastCheckpoint newLastCheckpoint = new LastCheckpoint(newCheckpointVersion, checkpointEntries.size(), Optional.empty());
try (OutputStream outputStream = fileSystem.create(new Path(transactionLogDirectory, LAST_CHECKPOINT_FILENAME), true)) {
outputStream.write(lastCheckpointCodec.toJsonBytes(newLastCheckpoint));
}
} catch (IOException e) {
throw new UncheckedIOException(e);
}
}
use of io.trino.plugin.hive.HdfsEnvironment in project trino by trinodb.
the class AbstractTestDeltaLakeCreateTableStatistics method getAddFileEntries.
protected List<AddFileEntry> getAddFileEntries(String tableName) throws IOException {
TestingConnectorContext context = new TestingConnectorContext();
HdfsConfig hdfsConfig = new HdfsConfig();
HdfsConfiguration hdfsConfiguration = new HiveHdfsConfiguration(new HdfsConfigurationInitializer(hdfsConfig), ImmutableSet.of());
HdfsEnvironment hdfsEnvironment = new HdfsEnvironment(hdfsConfiguration, hdfsConfig, new NoHdfsAuthentication());
TransactionLogAccess transactionLogAccess = new TransactionLogAccess(context.getTypeManager(), new CheckpointSchemaManager(context.getTypeManager()), new DeltaLakeConfig(), new FileFormatDataSourceStats(), hdfsEnvironment, new ParquetReaderConfig(), new DeltaLakeConfig());
return transactionLogAccess.getActiveFiles(transactionLogAccess.loadSnapshot(new SchemaTableName(SCHEMA, tableName), new Path(format("s3://%s/%s", bucketName, tableName)), SESSION), SESSION);
}
use of io.trino.plugin.hive.HdfsEnvironment in project trino by trinodb.
the class IcebergPageSourceProvider method createOrcPageSource.
private static ReaderPageSource createOrcPageSource(HdfsEnvironment hdfsEnvironment, ConnectorIdentity identity, Configuration configuration, Path path, long start, long length, long fileSize, List<IcebergColumnHandle> columns, TupleDomain<IcebergColumnHandle> effectivePredicate, OrcReaderOptions options, FileFormatDataSourceStats stats, TypeManager typeManager, Optional<NameMapping> nameMapping) {
OrcDataSource orcDataSource = null;
try {
FileSystem fileSystem = hdfsEnvironment.getFileSystem(identity, path, configuration);
FSDataInputStream inputStream = hdfsEnvironment.doAs(identity, () -> fileSystem.open(path));
orcDataSource = new HdfsOrcDataSource(new OrcDataSourceId(path.toString()), fileSize, options, inputStream, stats);
OrcReader reader = OrcReader.createOrcReader(orcDataSource, options).orElseThrow(() -> new TrinoException(ICEBERG_BAD_DATA, "ORC file is zero length"));
List<OrcColumn> fileColumns = reader.getRootColumn().getNestedColumns();
if (nameMapping.isPresent() && !hasIds(reader.getRootColumn())) {
fileColumns = fileColumns.stream().map(orcColumn -> setMissingFieldIds(orcColumn, nameMapping.get(), ImmutableList.of(orcColumn.getColumnName()))).collect(toImmutableList());
}
Map<Integer, OrcColumn> fileColumnsByIcebergId = mapIdsToOrcFileColumns(fileColumns);
TupleDomainOrcPredicateBuilder predicateBuilder = TupleDomainOrcPredicate.builder().setBloomFiltersEnabled(options.isBloomFiltersEnabled());
Map<IcebergColumnHandle, Domain> effectivePredicateDomains = effectivePredicate.getDomains().orElseThrow(() -> new IllegalArgumentException("Effective predicate is none"));
Optional<ReaderColumns> columnProjections = projectColumns(columns);
Map<Integer, List<List<Integer>>> projectionsByFieldId = columns.stream().collect(groupingBy(column -> column.getBaseColumnIdentity().getId(), mapping(IcebergColumnHandle::getPath, toUnmodifiableList())));
List<IcebergColumnHandle> readColumns = columnProjections.map(readerColumns -> (List<IcebergColumnHandle>) readerColumns.get().stream().map(IcebergColumnHandle.class::cast).collect(toImmutableList())).orElse(columns);
List<OrcColumn> fileReadColumns = new ArrayList<>(readColumns.size());
List<Type> fileReadTypes = new ArrayList<>(readColumns.size());
List<ProjectedLayout> projectedLayouts = new ArrayList<>(readColumns.size());
List<ColumnAdaptation> columnAdaptations = new ArrayList<>(readColumns.size());
for (IcebergColumnHandle column : readColumns) {
verify(column.isBaseColumn(), "Column projections must be based from a root column");
OrcColumn orcColumn = fileColumnsByIcebergId.get(column.getId());
if (orcColumn != null) {
Type readType = getOrcReadType(column.getType(), typeManager);
if (column.getType() == UUID && !"UUID".equals(orcColumn.getAttributes().get(ICEBERG_BINARY_TYPE))) {
throw new TrinoException(ICEBERG_BAD_DATA, format("Expected ORC column for UUID data to be annotated with %s=UUID: %s", ICEBERG_BINARY_TYPE, orcColumn));
}
List<List<Integer>> fieldIdProjections = projectionsByFieldId.get(column.getId());
ProjectedLayout projectedLayout = IcebergOrcProjectedLayout.createProjectedLayout(orcColumn, fieldIdProjections);
int sourceIndex = fileReadColumns.size();
columnAdaptations.add(ColumnAdaptation.sourceColumn(sourceIndex));
fileReadColumns.add(orcColumn);
fileReadTypes.add(readType);
projectedLayouts.add(projectedLayout);
for (Map.Entry<IcebergColumnHandle, Domain> domainEntry : effectivePredicateDomains.entrySet()) {
IcebergColumnHandle predicateColumn = domainEntry.getKey();
OrcColumn predicateOrcColumn = fileColumnsByIcebergId.get(predicateColumn.getId());
if (predicateOrcColumn != null && column.getColumnIdentity().equals(predicateColumn.getBaseColumnIdentity())) {
predicateBuilder.addColumn(predicateOrcColumn.getColumnId(), domainEntry.getValue());
}
}
} else {
columnAdaptations.add(ColumnAdaptation.nullColumn(column.getType()));
}
}
AggregatedMemoryContext memoryUsage = newSimpleAggregatedMemoryContext();
OrcDataSourceId orcDataSourceId = orcDataSource.getId();
OrcRecordReader recordReader = reader.createRecordReader(fileReadColumns, fileReadTypes, projectedLayouts, predicateBuilder.build(), start, length, UTC, memoryUsage, INITIAL_BATCH_SIZE, exception -> handleException(orcDataSourceId, exception), new IdBasedFieldMapperFactory(readColumns));
return new ReaderPageSource(new OrcPageSource(recordReader, columnAdaptations, orcDataSource, Optional.empty(), Optional.empty(), memoryUsage, stats), columnProjections);
} catch (Exception e) {
if (orcDataSource != null) {
try {
orcDataSource.close();
} catch (IOException ignored) {
}
}
if (e instanceof TrinoException) {
throw (TrinoException) e;
}
String message = format("Error opening Iceberg split %s (offset=%s, length=%s): %s", path, start, length, e.getMessage());
if (e instanceof BlockMissingException) {
throw new TrinoException(ICEBERG_MISSING_DATA, message, e);
}
throw new TrinoException(ICEBERG_CANNOT_OPEN_SPLIT, message, e);
}
}
use of io.trino.plugin.hive.HdfsEnvironment in project trino by trinodb.
the class IcebergFileWriterFactory method createParquetWriter.
private IcebergFileWriter createParquetWriter(Path outputPath, Schema icebergSchema, JobConf jobConf, ConnectorSession session, HdfsContext hdfsContext) {
List<String> fileColumnNames = icebergSchema.columns().stream().map(Types.NestedField::name).collect(toImmutableList());
List<Type> fileColumnTypes = icebergSchema.columns().stream().map(column -> toTrinoType(column.type(), typeManager)).collect(toImmutableList());
try {
FileSystem fileSystem = hdfsEnvironment.getFileSystem(session.getIdentity(), outputPath, jobConf);
Callable<Void> rollbackAction = () -> {
fileSystem.delete(outputPath, false);
return null;
};
ParquetWriterOptions parquetWriterOptions = ParquetWriterOptions.builder().setMaxPageSize(getParquetWriterPageSize(session)).setMaxBlockSize(getParquetWriterBlockSize(session)).setBatchSize(getParquetWriterBatchSize(session)).build();
return new IcebergParquetFileWriter(hdfsEnvironment.doAs(session.getIdentity(), () -> fileSystem.create(outputPath)), rollbackAction, fileColumnTypes, convert(icebergSchema, "table"), makeTypeMap(fileColumnTypes, fileColumnNames), parquetWriterOptions, IntStream.range(0, fileColumnNames.size()).toArray(), getCompressionCodec(session).getParquetCompressionCodec(), nodeVersion.toString(), outputPath, hdfsEnvironment, hdfsContext);
} catch (IOException e) {
throw new TrinoException(ICEBERG_WRITER_OPEN_ERROR, "Error creating Parquet file", e);
}
}
Aggregations