use of io.trino.plugin.deltalake.metastore.HiveMetastoreBackedDeltaLakeMetastore in project trino by trinodb.
the class DeltaLakeMetadataFactory method create.
public DeltaLakeMetadata create(ConnectorIdentity identity) {
// create per-transaction cache over hive metastore interface
CachingHiveMetastore cachingHiveMetastore = memoizeMetastore(hiveMetastoreFactory.createMetastore(Optional.of(identity)), perTransactionMetastoreCacheMaximumSize);
HiveMetastoreBackedDeltaLakeMetastore deltaLakeMetastore = new HiveMetastoreBackedDeltaLakeMetastore(cachingHiveMetastore, transactionLogAccess, typeManager, statisticsAccess);
return new DeltaLakeMetadata(deltaLakeMetastore, hdfsEnvironment, typeManager, domainCompactionThreshold, hideNonDeltaLakeTables, unsafeWritesEnabled, dataFileInfoCodec, deleteResultJsonCodec, transactionLogWriterFactory, nodeManager, checkpointWriterManager, checkpointWritingInterval, ignoreCheckpointWriteFailures, deleteSchemaLocationsFallback, statisticsAccess);
}
use of io.trino.plugin.deltalake.metastore.HiveMetastoreBackedDeltaLakeMetastore in project trino by trinodb.
the class TestDeltaLakeMetadata method setUp.
@BeforeClass
public void setUp() throws IOException {
temporaryCatalogDirectory = createTempDirectory("HiveCatalog").toFile();
Map<String, String> config = ImmutableMap.<String, String>builder().put("hive.metastore", "file").put("hive.metastore.catalog.dir", temporaryCatalogDirectory.getPath()).buildOrThrow();
Bootstrap app = new Bootstrap(// connector dependencies
new JsonModule(), binder -> {
ConnectorContext context = new TestingConnectorContext();
binder.bind(NodeVersion.class).toInstance(new NodeVersion(context.getNodeManager().getCurrentNode().getVersion()));
binder.bind(CatalogName.class).toInstance(new CatalogName("test"));
binder.bind(TypeManager.class).toInstance(context.getTypeManager());
binder.bind(NodeManager.class).toInstance(context.getNodeManager());
binder.bind(PageIndexerFactory.class).toInstance(context.getPageIndexerFactory());
}, // connector modules
new DeltaLakeMetastoreModule(), new DeltaLakeModule(), // test setup
binder -> {
binder.bind(HdfsEnvironment.class).toInstance(HDFS_ENVIRONMENT);
}, new AbstractModule() {
@Provides
public DeltaLakeMetastore getDeltaLakeMetastore(@RawHiveMetastoreFactory HiveMetastoreFactory hiveMetastoreFactory, TransactionLogAccess transactionLogAccess, TypeManager typeManager, CachingDeltaLakeStatisticsAccess statistics) {
return new HiveMetastoreBackedDeltaLakeMetastore(hiveMetastoreFactory.createMetastore(Optional.empty()), transactionLogAccess, typeManager, statistics);
}
});
Injector injector = app.doNotInitializeLogging().setRequiredConfigurationProperties(config).initialize();
deltaLakeMetadataFactory = injector.getInstance(DeltaLakeMetadataFactory.class);
injector.getInstance(DeltaLakeMetastore.class).createDatabase(Database.builder().setDatabaseName(DATABASE_NAME).setOwnerName(Optional.of("test")).setOwnerType(Optional.of(USER)).setLocation(Optional.empty()).build());
}
Aggregations