use of io.trino.plugin.base.CatalogName in project trino by trinodb.
the class PhoenixConnectorFactory method create.
@Override
public Connector create(String catalogName, Map<String, String> requiredConfig, ConnectorContext context) {
requireNonNull(requiredConfig, "requiredConfig is null");
try (ThreadContextClassLoader ignored = new ThreadContextClassLoader(classLoader)) {
Bootstrap app = new Bootstrap(new JsonModule(), new PhoenixClientModule(), binder -> {
binder.bind(CatalogName.class).toInstance(new CatalogName(catalogName));
binder.bind(ClassLoader.class).toInstance(PhoenixConnectorFactory.class.getClassLoader());
binder.bind(TypeManager.class).toInstance(context.getTypeManager());
});
Injector injector = app.doNotInitializeLogging().setRequiredConfigurationProperties(requiredConfig).initialize();
return injector.getInstance(PhoenixConnector.class);
}
}
use of io.trino.plugin.base.CatalogName in project trino by trinodb.
the class TestRubixCaching method testCoordinatorNotJoining.
@Test
public void testCoordinatorNotJoining() {
RubixConfig rubixConfig = new RubixConfig().setCacheLocation("/tmp/not/existing/dir");
HdfsConfigurationInitializer configurationInitializer = new HdfsConfigurationInitializer(config, ImmutableSet.of());
InternalNode workerNode = new InternalNode("worker", URI.create("http://127.0.0.2:8080"), UNKNOWN, false);
RubixInitializer rubixInitializer = new RubixInitializer(retry().maxAttempts(1), rubixConfig.setStartServerOnCoordinator(true), new TestingNodeManager(ImmutableList.of(workerNode)), new CatalogName("catalog"), configurationInitializer, new DefaultRubixHdfsInitializer(new HdfsAuthenticationConfig()));
assertThatThrownBy(rubixInitializer::initializeRubix).hasMessage("No coordinator node available");
}
use of io.trino.plugin.base.CatalogName in project trino by trinodb.
the class TestRubixCaching method initializeRubix.
private void initializeRubix(RubixConfig rubixConfig, List<Node> nodes) throws Exception {
tempDirectory = createTempDirectory(getClass().getSimpleName());
// create cache directories
List<java.nio.file.Path> cacheDirectories = ImmutableList.of(tempDirectory.resolve("cache1"), tempDirectory.resolve("cache2"));
for (java.nio.file.Path directory : cacheDirectories) {
createDirectories(directory);
}
// initialize rubix in master-only mode
rubixConfig.setStartServerOnCoordinator(true);
rubixConfig.setCacheLocation(Joiner.on(",").join(cacheDirectories.stream().map(java.nio.file.Path::toString).collect(toImmutableList())));
HdfsConfigurationInitializer configurationInitializer = new HdfsConfigurationInitializer(config, ImmutableSet.of(// fetch data immediately in async mode
config -> setRemoteFetchProcessInterval(config, 0)));
TestingNodeManager nodeManager = new TestingNodeManager(nodes);
rubixInitializer = new RubixInitializer(rubixConfig, nodeManager, new CatalogName("catalog"), configurationInitializer, new DefaultRubixHdfsInitializer(new HdfsAuthenticationConfig()));
rubixConfigInitializer = new RubixConfigurationInitializer(rubixInitializer);
rubixInitializer.initializeRubix();
retry().run("wait for rubix to startup", () -> {
if (!rubixInitializer.isServerUp()) {
throw new IllegalStateException("Rubix server has not started");
}
return null;
});
}
use of io.trino.plugin.base.CatalogName in project trino by trinodb.
the class TestIcebergSplitSource method createQueryRunner.
@Override
protected QueryRunner createQueryRunner() throws Exception {
HdfsConfig config = new HdfsConfig();
HdfsConfiguration configuration = new HiveHdfsConfiguration(new HdfsConfigurationInitializer(config), ImmutableSet.of());
HdfsEnvironment hdfsEnvironment = new HdfsEnvironment(configuration, config, new NoHdfsAuthentication());
File tempDir = Files.createTempDirectory("test_iceberg_split_source").toFile();
this.metastoreDir = new File(tempDir, "iceberg_data");
HiveMetastore metastore = createTestingFileHiveMetastore(metastoreDir);
IcebergTableOperationsProvider operationsProvider = new FileMetastoreTableOperationsProvider(new HdfsFileIoProvider(hdfsEnvironment));
this.catalog = new TrinoHiveCatalog(new CatalogName("hive"), memoizeMetastore(metastore, 1000), hdfsEnvironment, new TestingTypeManager(), operationsProvider, "test", false, false, false);
return createIcebergQueryRunner(ImmutableMap.of(), ImmutableMap.of(), ImmutableList.of(NATION), Optional.of(metastoreDir));
}
use of io.trino.plugin.base.CatalogName in project trino by trinodb.
the class TestIcebergV2 method updateTableToV2.
private Table updateTableToV2(String tableName) {
IcebergTableOperationsProvider tableOperationsProvider = new FileMetastoreTableOperationsProvider(new HdfsFileIoProvider(hdfsEnvironment));
TrinoCatalog catalog = new TrinoHiveCatalog(new CatalogName("hive"), CachingHiveMetastore.memoizeMetastore(metastore, 1000), hdfsEnvironment, new TestingTypeManager(), tableOperationsProvider, "test", false, false, false);
BaseTable table = (BaseTable) loadIcebergTable(catalog, tableOperationsProvider, SESSION, new SchemaTableName("tpch", tableName));
TableOperations operations = table.operations();
TableMetadata currentMetadata = operations.current();
operations.commit(currentMetadata, currentMetadata.upgradeToFormatVersion(2));
return table;
}
Aggregations