use of io.prestosql.testing.TestingNodeManager in project hetu-core by openlookeng.
the class TestHiveWriterFactory method testSortingPath.
@Test
public void testSortingPath() {
setUp();
String targetPath = "/tmp";
String writePath = "/tmp/table";
Optional<WriteIdInfo> writeIdInfo = Optional.of(new WriteIdInfo(1, 1, 0));
StorageFormat storageFormat = StorageFormat.fromHiveStorageFormat(ORC);
Storage storage = new Storage(storageFormat, "", Optional.empty(), false, ImmutableMap.of());
Table table = new Table("schema", "table", "user", "MANAGED_TABLE", storage, ImmutableList.of(new Column("col_1", HiveType.HIVE_INT, Optional.empty())), ImmutableList.of(), ImmutableMap.of("transactional", "true"), Optional.of("original"), Optional.of("expanded"));
HiveConfig hiveConfig = getHiveConfig();
HivePageSinkMetadata hivePageSinkMetadata = new HivePageSinkMetadata(new SchemaTableName("schema", "table"), Optional.of(table), ImmutableMap.of());
PageSorter pageSorter = new PagesIndexPageSorter(new PagesIndex.TestingFactory(false));
Metadata metadata = createTestMetadataManager();
TypeManager typeManager = new InternalTypeManager(metadata.getFunctionAndTypeManager());
HdfsConfiguration hdfsConfiguration = new HiveHdfsConfiguration(new HdfsConfigurationInitializer(hiveConfig), ImmutableSet.of());
HdfsEnvironment hdfsEnvironment = new HdfsEnvironment(hdfsConfiguration, hiveConfig, new NoHdfsAuthentication());
LocationService locationService = new HiveLocationService(hdfsEnvironment);
ConnectorSession session = newSession();
HiveWriterFactory hiveWriterFactory = new HiveWriterFactory(getDefaultHiveFileWriterFactories(hiveConfig), "schema", "table", false, HiveACIDWriteType.DELETE, ImmutableList.of(new HiveColumnHandle("col_1", HiveType.HIVE_INT, new TypeSignature("integer", ImmutableList.of()), 0, HiveColumnHandle.ColumnType.REGULAR, Optional.empty())), ORC, ORC, ImmutableMap.of(), OptionalInt.empty(), ImmutableList.of(), new LocationHandle(targetPath, writePath, false, LocationHandle.WriteMode.STAGE_AND_MOVE_TO_TARGET_DIRECTORY, writeIdInfo), locationService, session.getQueryId(), new HivePageSinkMetadataProvider(hivePageSinkMetadata, CachingHiveMetastore.memoizeMetastore(metastore, 1000), new HiveIdentity(session)), typeManager, hdfsEnvironment, pageSorter, hiveConfig.getWriterSortBufferSize(), hiveConfig.getMaxOpenSortFiles(), false, UTC, session, new TestingNodeManager("fake-environment"), new HiveEventClient(), new HiveSessionProperties(hiveConfig, new OrcFileWriterConfig(), new ParquetFileWriterConfig()), new HiveWriterStats(), getDefaultOrcFileWriterFactory(hiveConfig));
HiveWriter hiveWriter = hiveWriterFactory.createWriter(ImmutableList.of(), OptionalInt.empty(), Optional.empty());
assertEquals(((SortingFileWriter) hiveWriter.getFileWriter()).getTempFilePrefix().getName(), ".tmp-sort.bucket_00000");
}
use of io.prestosql.testing.TestingNodeManager in project boostkit-bigdata by kunpengcompute.
the class AbstractTestHiveFileSystem method setup.
protected void setup(String host, int port, String databaseName, Function<HiveConfig, HdfsConfiguration> hdfsConfigurationProvider, boolean s3SelectPushdownEnabled) {
database = databaseName;
table = new SchemaTableName(database, "presto_test_external_fs");
String random = UUID.randomUUID().toString().toLowerCase(ENGLISH).replace("-", "");
temporaryCreateTable = new SchemaTableName(database, "tmp_presto_test_create_" + random);
config = new HiveConfig().setS3SelectPushdownEnabled(s3SelectPushdownEnabled);
String proxy = System.getProperty("hive.metastore.thrift.client.socks-proxy");
if (proxy != null) {
config.setMetastoreSocksProxy(HostAndPort.fromString(proxy));
}
MetastoreLocator metastoreLocator = new TestingMetastoreLocator(config, host, port);
ExecutorService executors = newCachedThreadPool(daemonThreadsNamed("hive-%s"));
ExecutorService executorRefresh = newCachedThreadPool(daemonThreadsNamed("hive-refresh-%s"));
HivePartitionManager hivePartitionManager = new HivePartitionManager(TYPE_MANAGER, config);
HdfsConfiguration hdfsConfiguration = hdfsConfigurationProvider.apply(config);
hdfsEnvironment = new HdfsEnvironment(hdfsConfiguration, config, new NoHdfsAuthentication());
metastoreClient = new TestingHiveMetastore(new BridgingHiveMetastore(new ThriftHiveMetastore(metastoreLocator, new ThriftHiveMetastoreConfig())), executors, executorRefresh, config, getBasePath(), hdfsEnvironment);
locationService = new HiveLocationService(hdfsEnvironment);
JsonCodec<PartitionUpdate> partitionUpdateCodec = JsonCodec.jsonCodec(PartitionUpdate.class);
metadataFactory = new HiveMetadataFactory(config, metastoreClient, hdfsEnvironment, hivePartitionManager, newDirectExecutorService(), vacuumExecutorService, heartbeatService, vacuumExecutorService, TYPE_MANAGER, locationService, partitionUpdateCodec, new HiveTypeTranslator(), new NodeVersion("test_version"), SqlStandardAccessControlMetadata::new);
transactionManager = new HiveTransactionManager();
splitManager = new HiveSplitManager(transactionHandle -> ((HiveMetadata) transactionManager.get(transactionHandle)).getMetastore(), hivePartitionManager, new NamenodeStats(), hdfsEnvironment, new CachingDirectoryLister(new HiveConfig()), new BoundedExecutor(executors, config.getMaxSplitIteratorThreads()), new HiveCoercionPolicy(TYPE_MANAGER), new CounterStat(), config.getMaxOutstandingSplits(), config.getMaxOutstandingSplitsSize(), config.getMinPartitionBatchSize(), config.getMaxPartitionBatchSize(), config.getMaxInitialSplits(), config.getSplitLoaderConcurrency(), config.getMaxSplitsPerSecond(), config.getRecursiveDirWalkerEnabled(), null, config);
pageSinkProvider = new HivePageSinkProvider(getDefaultHiveFileWriterFactories(config), hdfsEnvironment, PAGE_SORTER, metastoreClient, new GroupByHashPageIndexerFactory(new JoinCompiler(createTestMetadataManager())), TYPE_MANAGER, config, locationService, partitionUpdateCodec, new TestingNodeManager("fake-environment"), new HiveEventClient(), new HiveSessionProperties(config, new OrcFileWriterConfig(), new ParquetFileWriterConfig()), new HiveWriterStats(), getDefaultOrcFileWriterFactory(config));
pageSourceProvider = new HivePageSourceProvider(config, hdfsEnvironment, getDefaultHiveRecordCursorProvider(config), getDefaultHiveDataStreamFactories(config), TYPE_MANAGER, getNoOpIndexCache(), getDefaultHiveSelectiveFactories(config));
}
use of io.prestosql.testing.TestingNodeManager in project hetu-core by openlookeng.
the class TestDataSourceTableSplitManager method setUp.
@BeforeClass
public void setUp() throws Exception {
initSplitDatabase();
nodeManager = new TestingNodeManager();
catalogName = connection.getCatalog();
initTable();
}
use of io.prestosql.testing.TestingNodeManager in project hetu-core by openlookeng.
the class TestHivePageSink method createPageSink.
private static ConnectorPageSink createPageSink(HiveTransactionHandle transaction, HiveConfig config, HiveMetastore metastore, Path outputPath, HiveWriterStats stats) {
ConnectorSession session = getSession(config);
HiveIdentity identity = new HiveIdentity(session);
LocationHandle locationHandle = new LocationHandle(outputPath, outputPath, false, DIRECT_TO_TARGET_NEW_DIRECTORY, Optional.empty());
HiveOutputTableHandle handle = new HiveOutputTableHandle(SCHEMA_NAME, TABLE_NAME, getColumnHandles(), new HivePageSinkMetadata(new SchemaTableName(SCHEMA_NAME, TABLE_NAME), metastore.getTable(identity, SCHEMA_NAME, TABLE_NAME), ImmutableMap.of()), locationHandle, config.getHiveStorageFormat(), config.getHiveStorageFormat(), ImmutableList.of(), Optional.empty(), "test", ImmutableMap.of());
JsonCodec<PartitionUpdate> partitionUpdateCodec = JsonCodec.jsonCodec(PartitionUpdate.class);
HdfsEnvironment hdfsEnvironment = HiveTestUtils.createTestHdfsEnvironment(config);
HivePageSinkProvider provider = new HivePageSinkProvider(HiveTestUtils.getDefaultHiveFileWriterFactories(config), hdfsEnvironment, HiveTestUtils.PAGE_SORTER, metastore, new GroupByHashPageIndexerFactory(new JoinCompiler(createTestMetadataManager())), HiveTestUtils.TYPE_MANAGER, config, new HiveLocationService(hdfsEnvironment), partitionUpdateCodec, new TestingNodeManager("fake-environment"), new HiveEventClient(), new HiveSessionProperties(config, new OrcFileWriterConfig(), new ParquetFileWriterConfig()), stats, HiveTestUtils.getDefaultOrcFileWriterFactory(config));
return provider.createPageSink(transaction, getSession(config), handle);
}
use of io.prestosql.testing.TestingNodeManager in project hetu-core by openlookeng.
the class AbstractTestHiveFileSystem method setup.
protected void setup(String host, int port, String databaseName, Function<HiveConfig, HdfsConfiguration> hdfsConfigurationProvider, boolean s3SelectPushdownEnabled) {
database = databaseName;
table = new SchemaTableName(database, "presto_test_external_fs");
String random = UUID.randomUUID().toString().toLowerCase(ENGLISH).replace("-", "");
temporaryCreateTable = new SchemaTableName(database, "tmp_presto_test_create_" + random);
config = new HiveConfig().setS3SelectPushdownEnabled(s3SelectPushdownEnabled);
String proxy = System.getProperty("hive.metastore.thrift.client.socks-proxy");
if (proxy != null) {
config.setMetastoreSocksProxy(HostAndPort.fromString(proxy));
}
MetastoreLocator metastoreLocator = new TestingMetastoreLocator(config, host, port);
ExecutorService executors = newCachedThreadPool(daemonThreadsNamed("hive-%s"));
ExecutorService executorRefresh = newCachedThreadPool(daemonThreadsNamed("hive-refresh-%s"));
HivePartitionManager hivePartitionManager = new HivePartitionManager(TYPE_MANAGER, config);
HdfsConfiguration hdfsConfiguration = hdfsConfigurationProvider.apply(config);
hdfsEnvironment = new HdfsEnvironment(hdfsConfiguration, config, new NoHdfsAuthentication());
metastoreClient = new TestingHiveMetastore(new BridgingHiveMetastore(new ThriftHiveMetastore(metastoreLocator, new ThriftHiveMetastoreConfig())), executors, executorRefresh, config, getBasePath(), hdfsEnvironment);
locationService = new HiveLocationService(hdfsEnvironment);
JsonCodec<PartitionUpdate> partitionUpdateCodec = JsonCodec.jsonCodec(PartitionUpdate.class);
metadataFactory = new HiveMetadataFactory(config, metastoreClient, hdfsEnvironment, hivePartitionManager, newDirectExecutorService(), vacuumExecutorService, heartbeatService, vacuumExecutorService, TYPE_MANAGER, locationService, partitionUpdateCodec, new HiveTypeTranslator(), new NodeVersion("test_version"), SqlStandardAccessControlMetadata::new);
transactionManager = new HiveTransactionManager();
splitManager = new HiveSplitManager(transactionHandle -> ((HiveMetadata) transactionManager.get(transactionHandle)).getMetastore(), hivePartitionManager, new NamenodeStats(), hdfsEnvironment, new CachingDirectoryLister(new HiveConfig()), new BoundedExecutor(executors, config.getMaxSplitIteratorThreads()), new HiveCoercionPolicy(TYPE_MANAGER), new CounterStat(), config.getMaxOutstandingSplits(), config.getMaxOutstandingSplitsSize(), config.getMinPartitionBatchSize(), config.getMaxPartitionBatchSize(), config.getMaxInitialSplits(), config.getSplitLoaderConcurrency(), config.getMaxSplitsPerSecond(), config.getRecursiveDirWalkerEnabled(), null, config);
pageSinkProvider = new HivePageSinkProvider(getDefaultHiveFileWriterFactories(config), hdfsEnvironment, PAGE_SORTER, metastoreClient, new GroupByHashPageIndexerFactory(new JoinCompiler(createTestMetadataManager())), TYPE_MANAGER, config, locationService, partitionUpdateCodec, new TestingNodeManager("fake-environment"), new HiveEventClient(), new HiveSessionProperties(config, new OrcFileWriterConfig(), new ParquetFileWriterConfig()), new HiveWriterStats(), getDefaultOrcFileWriterFactory(config));
pageSourceProvider = new HivePageSourceProvider(config, hdfsEnvironment, getDefaultHiveRecordCursorProvider(config), getDefaultHiveDataStreamFactories(config), TYPE_MANAGER, getNoOpIndexCache(), getDefaultHiveSelectiveFactories(config));
}
Aggregations