use of com.facebook.presto.hive.authentication.NoHdfsAuthentication in project presto by prestodb.
the class AbstractTestHiveClient method setup.
protected final void setup(String databaseName, HiveClientConfig hiveClientConfig, CacheConfig cacheConfig, MetastoreClientConfig metastoreClientConfig, ExtendedHiveMetastore hiveMetastore) {
HiveConnectorId connectorId = new HiveConnectorId("hive-test");
setupHive(connectorId.toString(), databaseName, hiveClientConfig.getTimeZone());
hivePartitionManager = new HivePartitionManager(FUNCTION_AND_TYPE_MANAGER, hiveClientConfig);
metastoreClient = hiveMetastore;
HdfsConfiguration hdfsConfiguration = new HiveHdfsConfiguration(new HdfsConfigurationInitializer(hiveClientConfig, metastoreClientConfig), ImmutableSet.of());
hdfsEnvironment = new HdfsEnvironment(hdfsConfiguration, metastoreClientConfig, new NoHdfsAuthentication());
locationService = new HiveLocationService(hdfsEnvironment);
metadataFactory = new HiveMetadataFactory(metastoreClient, hdfsEnvironment, hivePartitionManager, timeZone, true, false, false, false, true, true, getHiveClientConfig().getMaxPartitionBatchSize(), getHiveClientConfig().getMaxPartitionsPerScan(), false, FUNCTION_AND_TYPE_MANAGER, locationService, FUNCTION_RESOLUTION, ROW_EXPRESSION_SERVICE, FILTER_STATS_CALCULATOR_SERVICE, new TableParameterCodec(), HiveTestUtils.PARTITION_UPDATE_CODEC, HiveTestUtils.PARTITION_UPDATE_SMILE_CODEC, listeningDecorator(executor), new HiveTypeTranslator(), new HiveStagingFileCommitter(hdfsEnvironment, listeningDecorator(executor)), new HiveZeroRowFileCreator(hdfsEnvironment, new OutputStreamDataSinkFactory(), listeningDecorator(executor)), TEST_SERVER_VERSION, new HivePartitionObjectBuilder(), new HiveEncryptionInformationProvider(ImmutableList.of()), new HivePartitionStats(), new HiveFileRenamer(), DEFAULT_COLUMN_CONVERTER_PROVIDER);
transactionManager = new HiveTransactionManager();
encryptionInformationProvider = new HiveEncryptionInformationProvider(ImmutableList.of());
splitManager = new HiveSplitManager(transactionManager, new NamenodeStats(), hdfsEnvironment, new CachingDirectoryLister(new HadoopDirectoryLister(), new HiveClientConfig()), directExecutor(), new HiveCoercionPolicy(FUNCTION_AND_TYPE_MANAGER), new CounterStat(), 100, hiveClientConfig.getMaxOutstandingSplitsSize(), hiveClientConfig.getMinPartitionBatchSize(), hiveClientConfig.getMaxPartitionBatchSize(), hiveClientConfig.getSplitLoaderConcurrency(), false, new ConfigBasedCacheQuotaRequirementProvider(cacheConfig), encryptionInformationProvider);
pageSinkProvider = new HivePageSinkProvider(getDefaultHiveFileWriterFactories(hiveClientConfig, metastoreClientConfig), hdfsEnvironment, PAGE_SORTER, metastoreClient, new GroupByHashPageIndexerFactory(JOIN_COMPILER), FUNCTION_AND_TYPE_MANAGER, getHiveClientConfig(), getMetastoreClientConfig(), locationService, HiveTestUtils.PARTITION_UPDATE_CODEC, HiveTestUtils.PARTITION_UPDATE_SMILE_CODEC, new TestingNodeManager("fake-environment"), new HiveEventClient(), new HiveSessionProperties(hiveClientConfig, new OrcFileWriterConfig(), new ParquetFileWriterConfig(), new CacheConfig()), new HiveWriterStats(), getDefaultOrcFileWriterFactory(hiveClientConfig, metastoreClientConfig), DEFAULT_COLUMN_CONVERTER_PROVIDER);
pageSourceProvider = new HivePageSourceProvider(hiveClientConfig, hdfsEnvironment, getDefaultHiveRecordCursorProvider(hiveClientConfig, metastoreClientConfig), getDefaultHiveBatchPageSourceFactories(hiveClientConfig, metastoreClientConfig), getDefaultHiveSelectivePageSourceFactories(hiveClientConfig, metastoreClientConfig), FUNCTION_AND_TYPE_MANAGER, ROW_EXPRESSION_SERVICE);
}
use of com.facebook.presto.hive.authentication.NoHdfsAuthentication in project presto by prestodb.
the class HiveQueryRunner method createQueryRunner.
public static DistributedQueryRunner createQueryRunner(Iterable<TpchTable<?>> tables, Map<String, String> extraProperties, Map<String, String> extraCoordinatorProperties, String security, Map<String, String> extraHiveProperties, Optional<Integer> workerCount, Optional<Path> baseDataDir, Optional<BiFunction<Integer, URI, Process>> externalWorkerLauncher) throws Exception {
assertEquals(DateTimeZone.getDefault(), TIME_ZONE, "Timezone not configured correctly. Add -Duser.timezone=America/Bahia_Banderas to your JVM arguments");
setupLogging();
Map<String, String> systemProperties = ImmutableMap.<String, String>builder().put("task.writer-count", "2").put("task.partitioned-writer-count", "4").put("tracing.tracer-type", "simple").put("tracing.enable-distributed-tracing", "simple").putAll(extraProperties).build();
DistributedQueryRunner queryRunner = DistributedQueryRunner.builder(createSession(Optional.of(new SelectedRole(ROLE, Optional.of("admin"))))).setNodeCount(workerCount.orElse(4)).setExtraProperties(systemProperties).setCoordinatorProperties(extraCoordinatorProperties).setBaseDataDir(baseDataDir).setExternalWorkerLauncher(externalWorkerLauncher).build();
try {
queryRunner.installPlugin(new TpchPlugin());
queryRunner.installPlugin(new TestingHiveEventListenerPlugin());
queryRunner.createCatalog("tpch", "tpch");
File baseDir = queryRunner.getCoordinator().getBaseDataDir().resolve("hive_data").toFile();
HiveClientConfig hiveClientConfig = new HiveClientConfig();
MetastoreClientConfig metastoreClientConfig = new MetastoreClientConfig();
HdfsConfiguration hdfsConfiguration = new HiveHdfsConfiguration(new HdfsConfigurationInitializer(hiveClientConfig, metastoreClientConfig), ImmutableSet.of());
HdfsEnvironment hdfsEnvironment = new HdfsEnvironment(hdfsConfiguration, metastoreClientConfig, new NoHdfsAuthentication());
FileHiveMetastore metastore = new FileHiveMetastore(hdfsEnvironment, baseDir.toURI().toString(), "test");
queryRunner.installPlugin(new HivePlugin(HIVE_CATALOG, Optional.of(metastore)));
Map<String, String> hiveProperties = ImmutableMap.<String, String>builder().putAll(extraHiveProperties).put("hive.time-zone", TIME_ZONE.getID()).put("hive.security", security).put("hive.max-partitions-per-scan", "1000").put("hive.assume-canonical-partition-keys", "true").put("hive.collect-column-statistics-on-write", "true").put("hive.temporary-table-schema", TEMPORARY_TABLE_SCHEMA).build();
Map<String, String> storageProperties = extraHiveProperties.containsKey("hive.storage-format") ? ImmutableMap.copyOf(hiveProperties) : ImmutableMap.<String, String>builder().putAll(hiveProperties).put("hive.storage-format", "TEXTFILE").put("hive.compression-codec", "NONE").build();
Map<String, String> hiveBucketedProperties = ImmutableMap.<String, String>builder().putAll(storageProperties).put("hive.max-initial-split-size", // so that each bucket has multiple splits
"10kB").put("hive.max-split-size", // so that each bucket has multiple splits
"10kB").build();
queryRunner.createCatalog(HIVE_CATALOG, HIVE_CATALOG, hiveProperties);
queryRunner.createCatalog(HIVE_BUCKETED_CATALOG, HIVE_CATALOG, hiveBucketedProperties);
if (!metastore.getDatabase(METASTORE_CONTEXT, TPCH_SCHEMA).isPresent()) {
metastore.createDatabase(METASTORE_CONTEXT, createDatabaseMetastoreObject(TPCH_SCHEMA));
copyTpchTables(queryRunner, "tpch", TINY_SCHEMA_NAME, createSession(Optional.empty()), tables);
}
if (!metastore.getDatabase(METASTORE_CONTEXT, TPCH_BUCKETED_SCHEMA).isPresent()) {
metastore.createDatabase(METASTORE_CONTEXT, createDatabaseMetastoreObject(TPCH_BUCKETED_SCHEMA));
copyTpchTablesBucketed(queryRunner, "tpch", TINY_SCHEMA_NAME, createBucketedSession(Optional.empty()), tables);
}
if (!metastore.getDatabase(METASTORE_CONTEXT, TEMPORARY_TABLE_SCHEMA).isPresent()) {
metastore.createDatabase(METASTORE_CONTEXT, createDatabaseMetastoreObject(TEMPORARY_TABLE_SCHEMA));
}
return queryRunner;
} catch (Exception e) {
queryRunner.close();
throw e;
}
}
use of com.facebook.presto.hive.authentication.NoHdfsAuthentication in project presto by prestodb.
the class TestHiveLogicalPlanner method replicateHiveMetastore.
private ExtendedHiveMetastore replicateHiveMetastore(DistributedQueryRunner queryRunner) {
URI baseDir = queryRunner.getCoordinator().getBaseDataDir().resolve("hive_data").toUri();
HiveClientConfig hiveClientConfig = new HiveClientConfig();
MetastoreClientConfig metastoreClientConfig = new MetastoreClientConfig();
HdfsConfiguration hdfsConfiguration = new HiveHdfsConfiguration(new HdfsConfigurationInitializer(hiveClientConfig, metastoreClientConfig), ImmutableSet.of());
HdfsEnvironment hdfsEnvironment = new HdfsEnvironment(hdfsConfiguration, metastoreClientConfig, new NoHdfsAuthentication());
return new FileHiveMetastore(hdfsEnvironment, baseDir.toString(), "test");
}
Aggregations