use of io.trino.plugin.hive.metastore.HiveMetastore in project trino by trinodb.
the class TestHiveGlueMetastore method doGetPartitionsFilterTest.
/**
* @param filterList should be same sized list as expectedValuesList
* @param expectedValuesList
* @throws Exception
*/
private void doGetPartitionsFilterTest(List<ColumnMetadata> columnMetadata, List<String> partitionColumnNames, List<PartitionValues> partitionValues, List<TupleDomain<String>> filterList, List<List<PartitionValues>> expectedValuesList) throws Exception {
try (CloseableSchamaTableName closeableTableName = new CloseableSchamaTableName(temporaryTable(("get_partitions")))) {
SchemaTableName tableName = closeableTableName.getSchemaTableName();
createDummyPartitionedTable(tableName, columnMetadata, partitionColumnNames, partitionValues);
HiveMetastore metastoreClient = getMetastoreClient();
for (int i = 0; i < filterList.size(); i++) {
TupleDomain<String> filter = filterList.get(i);
List<PartitionValues> expectedValues = expectedValuesList.get(i);
List<String> expectedResults = expectedValues.stream().map(expectedPartitionValues -> makePartName(partitionColumnNames, expectedPartitionValues.getValues())).collect(toImmutableList());
Optional<List<String>> partitionNames = metastoreClient.getPartitionNamesByFilter(tableName.getSchemaName(), tableName.getTableName(), partitionColumnNames, filter);
assertTrue(partitionNames.isPresent());
assertEquals(partitionNames.get(), expectedResults, format("lists \nactual: %s\nexpected: %s\nmismatch for filter %s (input index %d)\n", partitionNames.get(), expectedResults, filter, i));
}
}
}
use of io.trino.plugin.hive.metastore.HiveMetastore in project trino by trinodb.
the class TestHiveGlueMetastore method testGetPartitions.
@Override
public void testGetPartitions() throws Exception {
try {
SchemaTableName tableName = temporaryTable("get_partitions");
createDummyPartitionedTable(tableName, CREATE_TABLE_COLUMNS_PARTITIONED);
HiveMetastore metastoreClient = getMetastoreClient();
Optional<List<String>> partitionNames = metastoreClient.getPartitionNamesByFilter(tableName.getSchemaName(), tableName.getTableName(), ImmutableList.of("ds"), TupleDomain.all());
assertTrue(partitionNames.isPresent());
assertEquals(partitionNames.get(), ImmutableList.of("ds=2016-01-01", "ds=2016-01-02"));
} finally {
dropTable(tablePartitionFormat);
}
}
use of io.trino.plugin.hive.metastore.HiveMetastore in project trino by trinodb.
the class TestHivePlans method createLocalQueryRunner.
@Override
protected LocalQueryRunner createLocalQueryRunner() {
baseDir = Files.createTempDir();
HdfsConfig config = new HdfsConfig();
HdfsConfiguration configuration = new HiveHdfsConfiguration(new HdfsConfigurationInitializer(config), ImmutableSet.of());
HdfsEnvironment environment = new HdfsEnvironment(configuration, config, new NoHdfsAuthentication());
HiveMetastore metastore = new FileHiveMetastore(new NodeVersion("test_version"), environment, new MetastoreConfig(), new FileHiveMetastoreConfig().setCatalogDirectory(baseDir.toURI().toString()).setMetastoreUser("test"));
Database database = Database.builder().setDatabaseName(SCHEMA_NAME).setOwnerName(Optional.of("public")).setOwnerType(Optional.of(PrincipalType.ROLE)).build();
metastore.createDatabase(database);
return createQueryRunner(HIVE_SESSION, metastore);
}
use of io.trino.plugin.hive.metastore.HiveMetastore in project trino by trinodb.
the class TestHiveProjectionPushdownIntoTableScan method createLocalQueryRunner.
@Override
protected LocalQueryRunner createLocalQueryRunner() {
baseDir = Files.createTempDir();
HdfsConfig config = new HdfsConfig();
HdfsConfiguration configuration = new HiveHdfsConfiguration(new HdfsConfigurationInitializer(config), ImmutableSet.of());
HdfsEnvironment environment = new HdfsEnvironment(configuration, config, new NoHdfsAuthentication());
HiveMetastore metastore = new FileHiveMetastore(new NodeVersion("test_version"), environment, new MetastoreConfig(), new FileHiveMetastoreConfig().setCatalogDirectory(baseDir.toURI().toString()).setMetastoreUser("test"));
Database database = Database.builder().setDatabaseName(SCHEMA_NAME).setOwnerName(Optional.of("public")).setOwnerType(Optional.of(PrincipalType.ROLE)).build();
metastore.createDatabase(database);
LocalQueryRunner queryRunner = LocalQueryRunner.create(HIVE_SESSION);
queryRunner.createCatalog(HIVE_CATALOG_NAME, new TestingHiveConnectorFactory(metastore), ImmutableMap.of());
return queryRunner;
}
use of io.trino.plugin.hive.metastore.HiveMetastore in project trino by trinodb.
the class InternalIcebergConnectorFactory method createConnector.
public static Connector createConnector(String catalogName, Map<String, String> config, ConnectorContext context, Module module, Optional<HiveMetastore> metastore, Optional<FileIoProvider> fileIoProvider) {
ClassLoader classLoader = InternalIcebergConnectorFactory.class.getClassLoader();
try (ThreadContextClassLoader ignored = new ThreadContextClassLoader(classLoader)) {
Bootstrap app = new Bootstrap(new EventModule(), new MBeanModule(), new ConnectorObjectNameGeneratorModule(catalogName, "io.trino.plugin.iceberg", "trino.plugin.iceberg"), new JsonModule(), new IcebergModule(), new IcebergSecurityModule(), new IcebergCatalogModule(metastore), new HiveHdfsModule(), new HiveS3Module(), new HiveGcsModule(), new HiveAzureModule(), new HdfsAuthenticationModule(), new MBeanServerModule(), fileIoProvider.<Module>map(provider -> binder -> binder.bind(FileIoProvider.class).toInstance(provider)).orElse(binder -> binder.bind(FileIoProvider.class).to(HdfsFileIoProvider.class).in(SINGLETON)), binder -> {
binder.bind(NodeVersion.class).toInstance(new NodeVersion(context.getNodeManager().getCurrentNode().getVersion()));
binder.bind(NodeManager.class).toInstance(context.getNodeManager());
binder.bind(TypeManager.class).toInstance(context.getTypeManager());
binder.bind(PageIndexerFactory.class).toInstance(context.getPageIndexerFactory());
binder.bind(CatalogName.class).toInstance(new CatalogName(catalogName));
}, module);
Injector injector = app.doNotInitializeLogging().setRequiredConfigurationProperties(config).initialize();
LifeCycleManager lifeCycleManager = injector.getInstance(LifeCycleManager.class);
IcebergTransactionManager transactionManager = injector.getInstance(IcebergTransactionManager.class);
ConnectorSplitManager splitManager = injector.getInstance(ConnectorSplitManager.class);
ConnectorPageSourceProvider connectorPageSource = injector.getInstance(ConnectorPageSourceProvider.class);
ConnectorPageSinkProvider pageSinkProvider = injector.getInstance(ConnectorPageSinkProvider.class);
ConnectorNodePartitioningProvider connectorDistributionProvider = injector.getInstance(ConnectorNodePartitioningProvider.class);
Set<SessionPropertiesProvider> sessionPropertiesProviders = injector.getInstance(Key.get(new TypeLiteral<Set<SessionPropertiesProvider>>() {
}));
IcebergTableProperties icebergTableProperties = injector.getInstance(IcebergTableProperties.class);
Set<Procedure> procedures = injector.getInstance(Key.get(new TypeLiteral<Set<Procedure>>() {
}));
Set<TableProcedureMetadata> tableProcedures = injector.getInstance(Key.get(new TypeLiteral<Set<TableProcedureMetadata>>() {
}));
Optional<ConnectorAccessControl> accessControl = injector.getInstance(Key.get(new TypeLiteral<Optional<ConnectorAccessControl>>() {
}));
return new IcebergConnector(lifeCycleManager, transactionManager, new ClassLoaderSafeConnectorSplitManager(splitManager, classLoader), new ClassLoaderSafeConnectorPageSourceProvider(connectorPageSource, classLoader), new ClassLoaderSafeConnectorPageSinkProvider(pageSinkProvider, classLoader), new ClassLoaderSafeNodePartitioningProvider(connectorDistributionProvider, classLoader), sessionPropertiesProviders, IcebergSchemaProperties.SCHEMA_PROPERTIES, icebergTableProperties.getTableProperties(), accessControl, procedures, tableProcedures);
}
}
Aggregations