use of io.trino.testing.TestingNodeManager in project trino by trinodb.
the class TestHivePageSink method createPageSink.
private static ConnectorPageSink createPageSink(HiveTransactionHandle transaction, HiveConfig config, HiveMetastore metastore, Path outputPath, HiveWriterStats stats) {
LocationHandle locationHandle = new LocationHandle(outputPath, outputPath, false, DIRECT_TO_TARGET_NEW_DIRECTORY);
HiveOutputTableHandle handle = new HiveOutputTableHandle(SCHEMA_NAME, TABLE_NAME, getColumnHandles(), new HivePageSinkMetadata(new SchemaTableName(SCHEMA_NAME, TABLE_NAME), metastore.getTable(SCHEMA_NAME, TABLE_NAME), ImmutableMap.of()), locationHandle, config.getHiveStorageFormat(), config.getHiveStorageFormat(), ImmutableList.of(), Optional.empty(), "test", ImmutableMap.of(), NO_ACID_TRANSACTION, false, false);
JsonCodec<PartitionUpdate> partitionUpdateCodec = JsonCodec.jsonCodec(PartitionUpdate.class);
TypeOperators typeOperators = new TypeOperators();
BlockTypeOperators blockTypeOperators = new BlockTypeOperators(typeOperators);
HivePageSinkProvider provider = new HivePageSinkProvider(getDefaultHiveFileWriterFactories(config, HDFS_ENVIRONMENT), HDFS_ENVIRONMENT, PAGE_SORTER, HiveMetastoreFactory.ofInstance(metastore), new GroupByHashPageIndexerFactory(new JoinCompiler(typeOperators), blockTypeOperators), TESTING_TYPE_MANAGER, config, new HiveLocationService(HDFS_ENVIRONMENT), partitionUpdateCodec, new TestingNodeManager("fake-environment"), new HiveEventClient(), getHiveSessionProperties(config), stats);
return provider.createPageSink(transaction, getHiveSession(config), handle);
}
use of io.trino.testing.TestingNodeManager in project trino by trinodb.
the class AbstractTestHiveFileSystem method setup.
protected void setup(String host, int port, String databaseName, boolean s3SelectPushdownEnabled, HdfsConfiguration hdfsConfiguration) {
database = databaseName;
table = new SchemaTableName(database, "trino_test_external_fs");
tableWithHeader = new SchemaTableName(database, "trino_test_external_fs_with_header");
tableWithHeaderAndFooter = new SchemaTableName(database, "trino_test_external_fs_with_header_and_footer");
String random = randomUUID().toString().toLowerCase(ENGLISH).replace("-", "");
temporaryCreateTable = new SchemaTableName(database, "tmp_trino_test_create_" + random);
config = new HiveConfig().setS3SelectPushdownEnabled(s3SelectPushdownEnabled);
Optional<HostAndPort> proxy = Optional.ofNullable(System.getProperty("hive.metastore.thrift.client.socks-proxy")).map(HostAndPort::fromString);
MetastoreLocator metastoreLocator = new TestingMetastoreLocator(proxy, HostAndPort.fromParts(host, port));
HivePartitionManager hivePartitionManager = new HivePartitionManager(config);
hdfsEnvironment = new HdfsEnvironment(hdfsConfiguration, new HdfsConfig(), new NoHdfsAuthentication());
MetastoreConfig metastoreConfig = new MetastoreConfig();
metastoreClient = new TestingHiveMetastore(new BridgingHiveMetastore(new ThriftHiveMetastore(metastoreLocator, new HiveConfig(), metastoreConfig, new ThriftMetastoreConfig(), hdfsEnvironment, false), new HiveIdentity(getHiveSession(config).getIdentity())), getBasePath(), hdfsEnvironment);
locationService = new HiveLocationService(hdfsEnvironment);
JsonCodec<PartitionUpdate> partitionUpdateCodec = JsonCodec.jsonCodec(PartitionUpdate.class);
metadataFactory = new HiveMetadataFactory(new CatalogName("hive"), config, metastoreConfig, HiveMetastoreFactory.ofInstance(metastoreClient), hdfsEnvironment, hivePartitionManager, newDirectExecutorService(), heartbeatService, TESTING_TYPE_MANAGER, NOOP_METADATA_PROVIDER, locationService, partitionUpdateCodec, new NodeVersion("test_version"), new NoneHiveRedirectionsProvider(), ImmutableSet.of(new PartitionsSystemTableProvider(hivePartitionManager, TESTING_TYPE_MANAGER), new PropertiesSystemTableProvider()), new DefaultHiveMaterializedViewMetadataFactory(), SqlStandardAccessControlMetadata::new, NO_REDIRECTIONS, TableInvalidationCallback.NOOP);
transactionManager = new HiveTransactionManager(metadataFactory);
splitManager = new HiveSplitManager(transactionManager, hivePartitionManager, new NamenodeStats(), hdfsEnvironment, new CachingDirectoryLister(new HiveConfig()), new BoundedExecutor(executor, config.getMaxSplitIteratorThreads()), new CounterStat(), config.getMaxOutstandingSplits(), config.getMaxOutstandingSplitsSize(), config.getMinPartitionBatchSize(), config.getMaxPartitionBatchSize(), config.getMaxInitialSplits(), config.getSplitLoaderConcurrency(), config.getMaxSplitsPerSecond(), config.getRecursiveDirWalkerEnabled(), TESTING_TYPE_MANAGER);
TypeOperators typeOperators = new TypeOperators();
BlockTypeOperators blockTypeOperators = new BlockTypeOperators(typeOperators);
pageSinkProvider = new HivePageSinkProvider(getDefaultHiveFileWriterFactories(config, hdfsEnvironment), hdfsEnvironment, PAGE_SORTER, HiveMetastoreFactory.ofInstance(metastoreClient), new GroupByHashPageIndexerFactory(new JoinCompiler(typeOperators), blockTypeOperators), TESTING_TYPE_MANAGER, config, locationService, partitionUpdateCodec, new TestingNodeManager("fake-environment"), new HiveEventClient(), getHiveSessionProperties(config), new HiveWriterStats());
pageSourceProvider = new HivePageSourceProvider(TESTING_TYPE_MANAGER, hdfsEnvironment, config, getDefaultHivePageSourceFactories(hdfsEnvironment, config), getDefaultHiveRecordCursorProviders(config, hdfsEnvironment), new GenericHiveRecordCursorProvider(hdfsEnvironment, config), Optional.empty());
onSetupComplete();
}
use of io.trino.testing.TestingNodeManager in project trino by trinodb.
the class TestShardEjector method createNodeManager.
private static NodeManager createNodeManager(String current, String... others) {
Node currentNode = createTestingNode(current);
TestingNodeManager nodeManager = new TestingNodeManager(currentNode);
for (String other : others) {
nodeManager.addNode(createTestingNode(other));
}
return nodeManager;
}
use of io.trino.testing.TestingNodeManager in project trino by trinodb.
the class TestRaptorConnector method setup.
@BeforeMethod
public void setup() {
Jdbi dbi = createTestingJdbi();
dummyHandle = dbi.open();
metadataDao = dbi.onDemand(MetadataDao.class);
createTablesWithRetry(dbi);
dataDir = Files.createTempDir();
CatalogName connectorId = new CatalogName("test");
NodeManager nodeManager = new TestingNodeManager();
NodeSupplier nodeSupplier = nodeManager::getWorkerNodes;
ShardManager shardManager = createShardManager(dbi);
StorageManager storageManager = createRaptorStorageManager(dbi, dataDir);
StorageManagerConfig config = new StorageManagerConfig();
connector = new RaptorConnector(new LifeCycleManager(ImmutableList.of(), null), new TestingNodeManager(), new RaptorMetadataFactory(dbi, shardManager), new RaptorSplitManager(connectorId, nodeSupplier, shardManager, false), new RaptorPageSourceProvider(storageManager), new RaptorPageSinkProvider(storageManager, new PagesIndexPageSorter(new PagesIndex.TestingFactory(false)), config), new RaptorNodePartitioningProvider(nodeSupplier), new RaptorSessionProperties(config), new RaptorTableProperties(TESTING_TYPE_MANAGER), ImmutableSet.of(), Optional.empty(), dbi);
}
use of io.trino.testing.TestingNodeManager in project trino by trinodb.
the class TestRaptorSplitManager method setup.
@BeforeMethod
public void setup() throws Exception {
Jdbi dbi = createTestingJdbi();
dummyHandle = dbi.open();
createTablesWithRetry(dbi);
temporary = createTempDirectory(null);
AssignmentLimiter assignmentLimiter = new AssignmentLimiter(ImmutableSet::of, systemTicker(), new MetadataConfig());
shardManager = new DatabaseShardManager(dbi, new DaoSupplier<>(dbi, ShardDao.class), ImmutableSet::of, assignmentLimiter, systemTicker(), new Duration(0, MINUTES));
TestingNodeManager nodeManager = new TestingNodeManager();
NodeSupplier nodeSupplier = nodeManager::getWorkerNodes;
String nodeName = UUID.randomUUID().toString();
nodeManager.addNode(new InternalNode(nodeName, new URI("http://127.0.0.1/"), NodeVersion.UNKNOWN, false));
CatalogName connectorId = new CatalogName("raptor");
metadata = new RaptorMetadata(dbi, shardManager);
metadata.createTable(SESSION, TEST_TABLE, false);
tableHandle = metadata.getTableHandle(SESSION, TEST_TABLE.getTable());
List<ShardInfo> shards = ImmutableList.<ShardInfo>builder().add(shardInfo(UUID.randomUUID(), nodeName)).add(shardInfo(UUID.randomUUID(), nodeName)).add(shardInfo(UUID.randomUUID(), nodeName)).add(shardInfo(UUID.randomUUID(), nodeName)).build();
tableId = ((RaptorTableHandle) tableHandle).getTableId();
List<ColumnInfo> columns = metadata.getColumnHandles(SESSION, tableHandle).values().stream().map(RaptorColumnHandle.class::cast).map(ColumnInfo::fromHandle).collect(toList());
long transactionId = shardManager.beginTransaction();
shardManager.commitShards(transactionId, tableId, columns, shards, Optional.empty(), 0);
raptorSplitManager = new RaptorSplitManager(connectorId, nodeSupplier, shardManager, false);
}
Aggregations