use of io.trino.testing.TestingNodeManager in project trino by trinodb.
the class AbstractTestHive method setup.
protected final void setup(String databaseName, HiveConfig hiveConfig, HiveMetastore hiveMetastore, HdfsEnvironment hdfsConfiguration) {
setupHive(databaseName);
metastoreClient = hiveMetastore;
hdfsEnvironment = hdfsConfiguration;
HivePartitionManager partitionManager = new HivePartitionManager(hiveConfig);
locationService = new HiveLocationService(hdfsEnvironment);
JsonCodec<PartitionUpdate> partitionUpdateCodec = JsonCodec.jsonCodec(PartitionUpdate.class);
metadataFactory = new HiveMetadataFactory(new CatalogName("hive"), HiveMetastoreFactory.ofInstance(metastoreClient), hdfsEnvironment, partitionManager, 10, 10, 10, false, false, false, true, true, false, 1000, Optional.empty(), true, TESTING_TYPE_MANAGER, NOOP_METADATA_PROVIDER, locationService, partitionUpdateCodec, newFixedThreadPool(2), heartbeatService, TEST_SERVER_VERSION, (session, tableHandle) -> {
if (!tableHandle.getTableName().contains("apply_redirection_tester")) {
return Optional.empty();
}
return Optional.of(new TableScanRedirectApplicationResult(new CatalogSchemaTableName("hive", databaseName, "mock_redirection_target"), ImmutableMap.of(), TupleDomain.all()));
}, ImmutableSet.of(new PartitionsSystemTableProvider(partitionManager, TESTING_TYPE_MANAGER), new PropertiesSystemTableProvider()), metastore -> new NoneHiveMaterializedViewMetadata() {
@Override
public Optional<ConnectorMaterializedViewDefinition> getMaterializedView(ConnectorSession session, SchemaTableName viewName) {
if (!viewName.getTableName().contains("materialized_view_tester")) {
return Optional.empty();
}
return Optional.of(new ConnectorMaterializedViewDefinition("dummy_view_sql", Optional.empty(), Optional.empty(), Optional.empty(), ImmutableList.of(new ConnectorMaterializedViewDefinition.Column("abc", TypeId.of("type"))), Optional.empty(), Optional.of("alice"), ImmutableMap.of()));
}
}, SqlStandardAccessControlMetadata::new, NO_REDIRECTIONS, TableInvalidationCallback.NOOP);
transactionManager = new HiveTransactionManager(metadataFactory);
splitManager = new HiveSplitManager(transactionManager, partitionManager, new NamenodeStats(), hdfsEnvironment, new CachingDirectoryLister(hiveConfig), directExecutor(), new CounterStat(), 100, hiveConfig.getMaxOutstandingSplitsSize(), hiveConfig.getMinPartitionBatchSize(), hiveConfig.getMaxPartitionBatchSize(), hiveConfig.getMaxInitialSplits(), hiveConfig.getSplitLoaderConcurrency(), hiveConfig.getMaxSplitsPerSecond(), false, TESTING_TYPE_MANAGER);
pageSinkProvider = new HivePageSinkProvider(getDefaultHiveFileWriterFactories(hiveConfig, hdfsEnvironment), hdfsEnvironment, PAGE_SORTER, HiveMetastoreFactory.ofInstance(metastoreClient), new GroupByHashPageIndexerFactory(JOIN_COMPILER, BLOCK_TYPE_OPERATORS), TESTING_TYPE_MANAGER, getHiveConfig(), locationService, partitionUpdateCodec, new TestingNodeManager("fake-environment"), new HiveEventClient(), getHiveSessionProperties(hiveConfig), new HiveWriterStats());
pageSourceProvider = new HivePageSourceProvider(TESTING_TYPE_MANAGER, hdfsEnvironment, hiveConfig, getDefaultHivePageSourceFactories(hdfsEnvironment, hiveConfig), getDefaultHiveRecordCursorProviders(hiveConfig, hdfsEnvironment), new GenericHiveRecordCursorProvider(hdfsEnvironment, hiveConfig), Optional.empty());
nodePartitioningProvider = new HiveNodePartitioningProvider(new TestingNodeManager("fake-environment"), TESTING_TYPE_MANAGER);
}
use of io.trino.testing.TestingNodeManager in project trino by trinodb.
the class TestRubixCaching method testCoordinatorNotJoining.
@Test
public void testCoordinatorNotJoining() {
RubixConfig rubixConfig = new RubixConfig().setCacheLocation("/tmp/not/existing/dir");
HdfsConfigurationInitializer configurationInitializer = new HdfsConfigurationInitializer(config, ImmutableSet.of());
InternalNode workerNode = new InternalNode("worker", URI.create("http://127.0.0.2:8080"), UNKNOWN, false);
RubixInitializer rubixInitializer = new RubixInitializer(retry().maxAttempts(1), rubixConfig.setStartServerOnCoordinator(true), new TestingNodeManager(ImmutableList.of(workerNode)), new CatalogName("catalog"), configurationInitializer, new DefaultRubixHdfsInitializer(new HdfsAuthenticationConfig()));
assertThatThrownBy(rubixInitializer::initializeRubix).hasMessage("No coordinator node available");
}
use of io.trino.testing.TestingNodeManager in project trino by trinodb.
the class TestRubixCaching method initializeRubix.
private void initializeRubix(RubixConfig rubixConfig, List<Node> nodes) throws Exception {
tempDirectory = createTempDirectory(getClass().getSimpleName());
// create cache directories
List<java.nio.file.Path> cacheDirectories = ImmutableList.of(tempDirectory.resolve("cache1"), tempDirectory.resolve("cache2"));
for (java.nio.file.Path directory : cacheDirectories) {
createDirectories(directory);
}
// initialize rubix in master-only mode
rubixConfig.setStartServerOnCoordinator(true);
rubixConfig.setCacheLocation(Joiner.on(",").join(cacheDirectories.stream().map(java.nio.file.Path::toString).collect(toImmutableList())));
HdfsConfigurationInitializer configurationInitializer = new HdfsConfigurationInitializer(config, ImmutableSet.of(// fetch data immediately in async mode
config -> setRemoteFetchProcessInterval(config, 0)));
TestingNodeManager nodeManager = new TestingNodeManager(nodes);
rubixInitializer = new RubixInitializer(rubixConfig, nodeManager, new CatalogName("catalog"), configurationInitializer, new DefaultRubixHdfsInitializer(new HdfsAuthenticationConfig()));
rubixConfigInitializer = new RubixConfigurationInitializer(rubixInitializer);
rubixInitializer.initializeRubix();
retry().run("wait for rubix to startup", () -> {
if (!rubixInitializer.isServerUp()) {
throw new IllegalStateException("Rubix server has not started");
}
return null;
});
}
use of io.trino.testing.TestingNodeManager in project trino by trinodb.
the class TestRaptorMetadata method setupDatabase.
@BeforeMethod
public void setupDatabase() {
dbi = createTestingJdbi();
dummyHandle = dbi.open();
createTablesWithRetry(dbi);
NodeManager nodeManager = new TestingNodeManager();
NodeSupplier nodeSupplier = nodeManager::getWorkerNodes;
shardManager = createShardManager(dbi, nodeSupplier, systemTicker());
metadata = new RaptorMetadata(dbi, shardManager);
}
use of io.trino.testing.TestingNodeManager in project trino by trinodb.
the class TestRaptorStorageManager method createRaptorStorageManager.
public static RaptorStorageManager createRaptorStorageManager(Jdbi dbi, File temporary, int maxShardRows) {
File directory = new File(temporary, "data");
StorageService storageService = new FileStorageService(directory);
storageService.start();
File backupDirectory = new File(temporary, "backup");
FileBackupStore fileBackupStore = new FileBackupStore(backupDirectory);
fileBackupStore.start();
Optional<BackupStore> backupStore = Optional.of(fileBackupStore);
ShardManager shardManager = createShardManager(dbi);
ShardRecoveryManager recoveryManager = new ShardRecoveryManager(storageService, backupStore, new TestingNodeManager(), shardManager, MISSING_SHARD_DISCOVERY, 10);
return createRaptorStorageManager(storageService, backupStore, recoveryManager, new InMemoryShardRecorder(), maxShardRows, MAX_FILE_SIZE);
}
Aggregations