use of io.trino.plugin.hive.HdfsEnvironment in project trino by trinodb.
the class FileHiveMetastore method createTestingFileHiveMetastore.
@VisibleForTesting
public static FileHiveMetastore createTestingFileHiveMetastore(File catalogDirectory) {
HdfsConfig hdfsConfig = new HdfsConfig();
HdfsConfiguration hdfsConfiguration = new HiveHdfsConfiguration(new HdfsConfigurationInitializer(hdfsConfig), ImmutableSet.of());
HdfsEnvironment hdfsEnvironment = new HdfsEnvironment(hdfsConfiguration, hdfsConfig, new NoHdfsAuthentication());
return new FileHiveMetastore(new NodeVersion("testversion"), hdfsEnvironment, new MetastoreConfig(), new FileHiveMetastoreConfig().setCatalogDirectory(catalogDirectory.toURI().toString()).setMetastoreUser("test"));
}
use of io.trino.plugin.hive.HdfsEnvironment in project trino by trinodb.
the class TestCheckpointWriter method readCheckpoint.
private CheckpointEntries readCheckpoint(Path checkpointPath, MetadataEntry metadataEntry, boolean rowStatisticsEnabled) throws IOException {
FileSystem fileSystem = hdfsEnvironment.getFileSystem(new HdfsEnvironment.HdfsContext(session), checkpointPath);
FileStatus fileStatus = fileSystem.getFileStatus(checkpointPath);
Iterator<DeltaLakeTransactionLogEntry> checkpointEntryIterator = new CheckpointEntryIterator(checkpointPath, session, fileStatus.getLen(), checkpointSchemaManager, typeManager, ImmutableSet.of(METADATA, PROTOCOL, TRANSACTION, ADD, REMOVE), Optional.of(metadataEntry), hdfsEnvironment, new FileFormatDataSourceStats(), new ParquetReaderConfig().toParquetReaderOptions(), rowStatisticsEnabled);
CheckpointBuilder checkpointBuilder = new CheckpointBuilder();
while (checkpointEntryIterator.hasNext()) {
DeltaLakeTransactionLogEntry entry = checkpointEntryIterator.next();
checkpointBuilder.addLogEntry(entry);
}
return checkpointBuilder.build();
}
use of io.trino.plugin.hive.HdfsEnvironment in project trino by trinodb.
the class TestTableSnapshot method setUp.
@BeforeMethod
public void setUp() throws IOException, URISyntaxException {
checkpointSchemaManager = new CheckpointSchemaManager(typeManager);
URI deltaLogPath = getClass().getClassLoader().getResource("databricks/person").toURI();
tableLocation = new Path(deltaLogPath);
Configuration conf = new Configuration(false);
FileSystem filesystem = tableLocation.getFileSystem(conf);
accessTrackingFileSystem = new AccessTrackingFileSystem(filesystem);
HdfsConfig hdfsConfig = new HdfsConfig();
HdfsConfiguration hdfsConfiguration = new HiveHdfsConfiguration(new HdfsConfigurationInitializer(hdfsConfig), ImmutableSet.of());
hdfsEnvironment = new HdfsEnvironment(hdfsConfiguration, hdfsConfig, new NoHdfsAuthentication());
}
use of io.trino.plugin.hive.HdfsEnvironment in project trino by trinodb.
the class TestTransactionLogAccess method setupTransactionLogAccess.
private void setupTransactionLogAccess(String tableName, Path tableLocation) throws IOException {
TestingConnectorContext context = new TestingConnectorContext();
TypeManager typeManager = context.getTypeManager();
HdfsConfig hdfsConfig = new HdfsConfig();
HdfsConfiguration hdfsConfiguration = new HiveHdfsConfiguration(new HdfsConfigurationInitializer(hdfsConfig), ImmutableSet.of());
HdfsEnvironment hdfsEnvironment = new HdfsEnvironment(hdfsConfiguration, hdfsConfig, new NoHdfsAuthentication());
FileFormatDataSourceStats fileFormatDataSourceStats = new FileFormatDataSourceStats();
transactionLogAccess = new TrackingTransactionLogAccess(tableName, tableLocation, SESSION, typeManager, new CheckpointSchemaManager(typeManager), new DeltaLakeConfig(), fileFormatDataSourceStats, hdfsEnvironment, new ParquetReaderConfig());
DeltaLakeTableHandle tableHandle = new DeltaLakeTableHandle("schema", tableName, "location", // ignored
Optional.empty(), TupleDomain.none(), TupleDomain.none(), Optional.empty(), Optional.empty(), Optional.empty(), Optional.empty(), Optional.empty(), 0);
tableSnapshot = transactionLogAccess.loadSnapshot(tableHandle.getSchemaTableName(), tableLocation, SESSION);
}
use of io.trino.plugin.hive.HdfsEnvironment in project trino by trinodb.
the class TestConnectorPushdownRulesWithHive method createLocalQueryRunner.
@Override
protected Optional<LocalQueryRunner> createLocalQueryRunner() {
baseDir = Files.createTempDir();
HdfsConfig config = new HdfsConfig();
HdfsConfiguration configuration = new HiveHdfsConfiguration(new HdfsConfigurationInitializer(config), ImmutableSet.of());
HdfsEnvironment environment = new HdfsEnvironment(configuration, config, new NoHdfsAuthentication());
metastore = new FileHiveMetastore(new NodeVersion("test_version"), environment, new MetastoreConfig(), new FileHiveMetastoreConfig().setCatalogDirectory(baseDir.toURI().toString()).setMetastoreUser("test"));
Database database = Database.builder().setDatabaseName(SCHEMA_NAME).setOwnerName(Optional.of("public")).setOwnerType(Optional.of(PrincipalType.ROLE)).build();
metastore.createDatabase(database);
LocalQueryRunner queryRunner = LocalQueryRunner.create(HIVE_SESSION);
queryRunner.createCatalog(HIVE_CATALOG_NAME, new TestingHiveConnectorFactory(metastore), ImmutableMap.of());
return Optional.of(queryRunner);
}
Aggregations