use of com.facebook.presto.testing.TestingNodeManager in project presto by prestodb.
the class TestRaptorSplitManager method setup.
@BeforeMethod
public void setup() throws Exception {
FunctionAndTypeManager functionAndTypeManager = createTestFunctionAndTypeManager();
DBI dbi = new DBI("jdbc:h2:mem:test" + System.nanoTime() + "_" + ThreadLocalRandom.current().nextInt());
dbi.registerMapper(new TableColumn.Mapper(functionAndTypeManager));
dummyHandle = dbi.open();
createTablesWithRetry(dbi);
temporary = createTempDir();
AssignmentLimiter assignmentLimiter = new AssignmentLimiter(ImmutableSet::of, systemTicker(), new MetadataConfig());
shardManager = new DatabaseShardManager(dbi, new DaoSupplier<>(dbi, ShardDao.class), ImmutableSet::of, assignmentLimiter, systemTicker(), new Duration(0, MINUTES));
TestingNodeManager nodeManager = new TestingNodeManager();
NodeSupplier nodeSupplier = nodeManager::getWorkerNodes;
String nodeName = UUID.randomUUID().toString();
nodeManager.addNode(new InternalNode(nodeName, new URI("http://127.0.0.1/"), NodeVersion.UNKNOWN, false));
RaptorConnectorId connectorId = new RaptorConnectorId("raptor");
metadata = new RaptorMetadata(connectorId.toString(), dbi, shardManager, createTestFunctionAndTypeManager());
metadata.createTable(SESSION, TEST_TABLE, false);
tableHandle = metadata.getTableHandle(SESSION, TEST_TABLE.getTable());
List<ShardInfo> shards = ImmutableList.<ShardInfo>builder().add(shardInfo(UUID.randomUUID(), nodeName)).add(shardInfo(UUID.randomUUID(), nodeName)).add(shardInfo(UUID.randomUUID(), nodeName)).add(shardInfo(UUID.randomUUID(), nodeName)).build();
tableId = ((RaptorTableHandle) tableHandle).getTableId();
List<ColumnInfo> columns = metadata.getColumnHandles(SESSION, tableHandle).values().stream().map(RaptorColumnHandle.class::cast).map(ColumnInfo::fromHandle).collect(toList());
long transactionId = shardManager.beginTransaction();
shardManager.commitShards(transactionId, tableId, columns, shards, Optional.empty(), 0);
raptorSplitManager = new RaptorSplitManager(connectorId, nodeSupplier, shardManager, false);
}
use of com.facebook.presto.testing.TestingNodeManager in project presto by prestodb.
the class TestOrcStorageManager method createOrcStorageManager.
public static OrcStorageManager createOrcStorageManager(IDBI dbi, File temporary, int maxShardRows) {
URI directory = new File(temporary, "data").toURI();
StorageService storageService = new LocalFileStorageService(new LocalOrcDataEnvironment(), directory);
storageService.start();
File backupDirectory = new File(temporary, "backup");
FileBackupStore fileBackupStore = new FileBackupStore(backupDirectory);
fileBackupStore.start();
Optional<BackupStore> backupStore = Optional.of(fileBackupStore);
ShardManager shardManager = createShardManager(dbi);
ShardRecoveryManager recoveryManager = new ShardRecoveryManager(storageService, backupStore, new LocalOrcDataEnvironment(), new TestingNodeManager(), shardManager, MISSING_SHARD_DISCOVERY, 10);
return createOrcStorageManager(storageService, backupStore, recoveryManager, new InMemoryShardRecorder(), maxShardRows, MAX_FILE_SIZE);
}
use of com.facebook.presto.testing.TestingNodeManager in project presto by prestodb.
the class TestShardEjector method createNodeManager.
private static NodeManager createNodeManager(String current, String... others) {
Node currentNode = createTestingNode(current);
TestingNodeManager nodeManager = new TestingNodeManager(currentNode);
for (String other : others) {
nodeManager.addNode(createTestingNode(other));
}
return nodeManager;
}
use of com.facebook.presto.testing.TestingNodeManager in project presto by prestodb.
the class TestHivePageSink method createPageSink.
private static ConnectorPageSink createPageSink(HiveTransactionHandle transaction, HiveClientConfig config, MetastoreClientConfig metastoreClientConfig, ExtendedHiveMetastore metastore, Path outputPath, HiveWriterStats stats) {
LocationHandle locationHandle = new LocationHandle(outputPath, outputPath, Optional.empty(), NEW, DIRECT_TO_TARGET_NEW_DIRECTORY);
HiveOutputTableHandle handle = new HiveOutputTableHandle(SCHEMA_NAME, TABLE_NAME, getColumnHandles(), new HivePageSinkMetadata(new SchemaTableName(SCHEMA_NAME, TABLE_NAME), metastore.getTable(METASTORE_CONTEXT, SCHEMA_NAME, TABLE_NAME), ImmutableMap.of()), locationHandle, config.getHiveStorageFormat(), config.getHiveStorageFormat(), config.getHiveStorageFormat(), config.getCompressionCodec(), ImmutableList.of(), Optional.empty(), ImmutableList.of(), "test", ImmutableMap.of(), Optional.empty());
HdfsEnvironment hdfsEnvironment = createTestHdfsEnvironment(config, metastoreClientConfig);
HivePageSinkProvider provider = new HivePageSinkProvider(getDefaultHiveFileWriterFactories(config, metastoreClientConfig), hdfsEnvironment, PAGE_SORTER, metastore, new GroupByHashPageIndexerFactory(new JoinCompiler(MetadataManager.createTestMetadataManager(), new FeaturesConfig())), FUNCTION_AND_TYPE_MANAGER, config, metastoreClientConfig, new HiveLocationService(hdfsEnvironment), HiveTestUtils.PARTITION_UPDATE_CODEC, HiveTestUtils.PARTITION_UPDATE_SMILE_CODEC, new TestingNodeManager("fake-environment"), new HiveEventClient(), new HiveSessionProperties(config, new OrcFileWriterConfig(), new ParquetFileWriterConfig(), new CacheConfig()), stats, getDefaultOrcFileWriterFactory(config, metastoreClientConfig), HiveColumnConverterProvider.DEFAULT_COLUMN_CONVERTER_PROVIDER);
return provider.createPageSink(transaction, getSession(config), handle, TEST_HIVE_PAGE_SINK_CONTEXT);
}
use of com.facebook.presto.testing.TestingNodeManager in project presto by prestodb.
the class AbstractTestHiveFileSystem method setup.
protected void setup(String host, int port, String databaseName, BiFunction<HiveClientConfig, MetastoreClientConfig, HdfsConfiguration> hdfsConfigurationProvider, boolean s3SelectPushdownEnabled) {
database = databaseName;
table = new SchemaTableName(database, "presto_test_external_fs");
String random = UUID.randomUUID().toString().toLowerCase(ENGLISH).replace("-", "");
temporaryCreateTable = new SchemaTableName(database, "tmp_presto_test_create_" + random);
config = new HiveClientConfig().setS3SelectPushdownEnabled(s3SelectPushdownEnabled);
cacheConfig = new CacheConfig();
metastoreClientConfig = new MetastoreClientConfig();
String proxy = System.getProperty("hive.metastore.thrift.client.socks-proxy");
if (proxy != null) {
metastoreClientConfig.setMetastoreSocksProxy(HostAndPort.fromString(proxy));
}
HiveCluster hiveCluster = new TestingHiveCluster(metastoreClientConfig, host, port);
ExecutorService executor = newCachedThreadPool(daemonThreadsNamed("hive-%s"));
HivePartitionManager hivePartitionManager = new HivePartitionManager(FUNCTION_AND_TYPE_MANAGER, config);
HdfsConfiguration hdfsConfiguration = hdfsConfigurationProvider.apply(config, metastoreClientConfig);
hdfsEnvironment = new HdfsEnvironment(hdfsConfiguration, metastoreClientConfig, new NoHdfsAuthentication());
ColumnConverterProvider columnConverterProvider = HiveColumnConverterProvider.DEFAULT_COLUMN_CONVERTER_PROVIDER;
metastoreClient = new TestingHiveMetastore(new BridgingHiveMetastore(new ThriftHiveMetastore(hiveCluster, metastoreClientConfig), new HivePartitionMutator()), executor, metastoreClientConfig, getBasePath(), hdfsEnvironment);
locationService = new HiveLocationService(hdfsEnvironment);
metadataFactory = new HiveMetadataFactory(config, metastoreClientConfig, metastoreClient, hdfsEnvironment, hivePartitionManager, newDirectExecutorService(), FUNCTION_AND_TYPE_MANAGER, locationService, FUNCTION_RESOLUTION, ROW_EXPRESSION_SERVICE, FILTER_STATS_CALCULATOR_SERVICE, new TableParameterCodec(), HiveTestUtils.PARTITION_UPDATE_CODEC, HiveTestUtils.PARTITION_UPDATE_SMILE_CODEC, new HiveTypeTranslator(), new HiveStagingFileCommitter(hdfsEnvironment, listeningDecorator(executor)), new HiveZeroRowFileCreator(hdfsEnvironment, new OutputStreamDataSinkFactory(), listeningDecorator(executor)), new NodeVersion("test_version"), new HivePartitionObjectBuilder(), new HiveEncryptionInformationProvider(ImmutableList.of()), new HivePartitionStats(), new HiveFileRenamer(), columnConverterProvider);
transactionManager = new HiveTransactionManager();
splitManager = new HiveSplitManager(transactionManager, new NamenodeStats(), hdfsEnvironment, new CachingDirectoryLister(new HadoopDirectoryLister(), new HiveClientConfig()), new BoundedExecutor(executor, config.getMaxSplitIteratorThreads()), new HiveCoercionPolicy(FUNCTION_AND_TYPE_MANAGER), new CounterStat(), config.getMaxOutstandingSplits(), config.getMaxOutstandingSplitsSize(), config.getMinPartitionBatchSize(), config.getMaxPartitionBatchSize(), config.getSplitLoaderConcurrency(), config.getRecursiveDirWalkerEnabled(), new ConfigBasedCacheQuotaRequirementProvider(cacheConfig), new HiveEncryptionInformationProvider(ImmutableSet.of()));
pageSinkProvider = new HivePageSinkProvider(getDefaultHiveFileWriterFactories(config, metastoreClientConfig), hdfsEnvironment, PAGE_SORTER, metastoreClient, new GroupByHashPageIndexerFactory(new JoinCompiler(MetadataManager.createTestMetadataManager(), new FeaturesConfig())), FUNCTION_AND_TYPE_MANAGER, config, metastoreClientConfig, locationService, HiveTestUtils.PARTITION_UPDATE_CODEC, HiveTestUtils.PARTITION_UPDATE_SMILE_CODEC, new TestingNodeManager("fake-environment"), new HiveEventClient(), new HiveSessionProperties(config, new OrcFileWriterConfig(), new ParquetFileWriterConfig(), new CacheConfig()), new HiveWriterStats(), getDefaultOrcFileWriterFactory(config, metastoreClientConfig), columnConverterProvider);
pageSourceProvider = new HivePageSourceProvider(config, hdfsEnvironment, getDefaultHiveRecordCursorProvider(config, metastoreClientConfig), getDefaultHiveBatchPageSourceFactories(config, metastoreClientConfig), getDefaultHiveSelectivePageSourceFactories(config, metastoreClientConfig), FUNCTION_AND_TYPE_MANAGER, ROW_EXPRESSION_SERVICE);
}
Aggregations