Search in sources :

Example 11 with NoHdfsAuthentication

use of com.facebook.presto.hive.authentication.NoHdfsAuthentication in project presto by prestodb.

the class AbstractTestHiveClient method setup.

protected final void setup(String databaseName, HiveClientConfig hiveClientConfig, CacheConfig cacheConfig, MetastoreClientConfig metastoreClientConfig, ExtendedHiveMetastore hiveMetastore) {
    HiveConnectorId connectorId = new HiveConnectorId("hive-test");
    setupHive(connectorId.toString(), databaseName, hiveClientConfig.getTimeZone());
    hivePartitionManager = new HivePartitionManager(FUNCTION_AND_TYPE_MANAGER, hiveClientConfig);
    metastoreClient = hiveMetastore;
    HdfsConfiguration hdfsConfiguration = new HiveHdfsConfiguration(new HdfsConfigurationInitializer(hiveClientConfig, metastoreClientConfig), ImmutableSet.of());
    hdfsEnvironment = new HdfsEnvironment(hdfsConfiguration, metastoreClientConfig, new NoHdfsAuthentication());
    locationService = new HiveLocationService(hdfsEnvironment);
    metadataFactory = new HiveMetadataFactory(metastoreClient, hdfsEnvironment, hivePartitionManager, timeZone, true, false, false, false, true, true, getHiveClientConfig().getMaxPartitionBatchSize(), getHiveClientConfig().getMaxPartitionsPerScan(), false, FUNCTION_AND_TYPE_MANAGER, locationService, FUNCTION_RESOLUTION, ROW_EXPRESSION_SERVICE, FILTER_STATS_CALCULATOR_SERVICE, new TableParameterCodec(), HiveTestUtils.PARTITION_UPDATE_CODEC, HiveTestUtils.PARTITION_UPDATE_SMILE_CODEC, listeningDecorator(executor), new HiveTypeTranslator(), new HiveStagingFileCommitter(hdfsEnvironment, listeningDecorator(executor)), new HiveZeroRowFileCreator(hdfsEnvironment, new OutputStreamDataSinkFactory(), listeningDecorator(executor)), TEST_SERVER_VERSION, new HivePartitionObjectBuilder(), new HiveEncryptionInformationProvider(ImmutableList.of()), new HivePartitionStats(), new HiveFileRenamer(), DEFAULT_COLUMN_CONVERTER_PROVIDER);
    transactionManager = new HiveTransactionManager();
    encryptionInformationProvider = new HiveEncryptionInformationProvider(ImmutableList.of());
    splitManager = new HiveSplitManager(transactionManager, new NamenodeStats(), hdfsEnvironment, new CachingDirectoryLister(new HadoopDirectoryLister(), new HiveClientConfig()), directExecutor(), new HiveCoercionPolicy(FUNCTION_AND_TYPE_MANAGER), new CounterStat(), 100, hiveClientConfig.getMaxOutstandingSplitsSize(), hiveClientConfig.getMinPartitionBatchSize(), hiveClientConfig.getMaxPartitionBatchSize(), hiveClientConfig.getSplitLoaderConcurrency(), false, new ConfigBasedCacheQuotaRequirementProvider(cacheConfig), encryptionInformationProvider);
    pageSinkProvider = new HivePageSinkProvider(getDefaultHiveFileWriterFactories(hiveClientConfig, metastoreClientConfig), hdfsEnvironment, PAGE_SORTER, metastoreClient, new GroupByHashPageIndexerFactory(JOIN_COMPILER), FUNCTION_AND_TYPE_MANAGER, getHiveClientConfig(), getMetastoreClientConfig(), locationService, HiveTestUtils.PARTITION_UPDATE_CODEC, HiveTestUtils.PARTITION_UPDATE_SMILE_CODEC, new TestingNodeManager("fake-environment"), new HiveEventClient(), new HiveSessionProperties(hiveClientConfig, new OrcFileWriterConfig(), new ParquetFileWriterConfig(), new CacheConfig()), new HiveWriterStats(), getDefaultOrcFileWriterFactory(hiveClientConfig, metastoreClientConfig), DEFAULT_COLUMN_CONVERTER_PROVIDER);
    pageSourceProvider = new HivePageSourceProvider(hiveClientConfig, hdfsEnvironment, getDefaultHiveRecordCursorProvider(hiveClientConfig, metastoreClientConfig), getDefaultHiveBatchPageSourceFactories(hiveClientConfig, metastoreClientConfig), getDefaultHiveSelectivePageSourceFactories(hiveClientConfig, metastoreClientConfig), FUNCTION_AND_TYPE_MANAGER, ROW_EXPRESSION_SERVICE);
}
Also used : OutputStreamDataSinkFactory(com.facebook.presto.hive.datasink.OutputStreamDataSinkFactory) CounterStat(com.facebook.airlift.stats.CounterStat) NoHdfsAuthentication(com.facebook.presto.hive.authentication.NoHdfsAuthentication) TestingNodeManager(com.facebook.presto.testing.TestingNodeManager) CacheConfig(com.facebook.presto.cache.CacheConfig) GroupByHashPageIndexerFactory(com.facebook.presto.GroupByHashPageIndexerFactory)

Example 12 with NoHdfsAuthentication

use of com.facebook.presto.hive.authentication.NoHdfsAuthentication in project presto by prestodb.

the class HiveQueryRunner method createQueryRunner.

public static DistributedQueryRunner createQueryRunner(Iterable<TpchTable<?>> tables, Map<String, String> extraProperties, Map<String, String> extraCoordinatorProperties, String security, Map<String, String> extraHiveProperties, Optional<Integer> workerCount, Optional<Path> baseDataDir, Optional<BiFunction<Integer, URI, Process>> externalWorkerLauncher) throws Exception {
    assertEquals(DateTimeZone.getDefault(), TIME_ZONE, "Timezone not configured correctly. Add -Duser.timezone=America/Bahia_Banderas to your JVM arguments");
    setupLogging();
    Map<String, String> systemProperties = ImmutableMap.<String, String>builder().put("task.writer-count", "2").put("task.partitioned-writer-count", "4").put("tracing.tracer-type", "simple").put("tracing.enable-distributed-tracing", "simple").putAll(extraProperties).build();
    DistributedQueryRunner queryRunner = DistributedQueryRunner.builder(createSession(Optional.of(new SelectedRole(ROLE, Optional.of("admin"))))).setNodeCount(workerCount.orElse(4)).setExtraProperties(systemProperties).setCoordinatorProperties(extraCoordinatorProperties).setBaseDataDir(baseDataDir).setExternalWorkerLauncher(externalWorkerLauncher).build();
    try {
        queryRunner.installPlugin(new TpchPlugin());
        queryRunner.installPlugin(new TestingHiveEventListenerPlugin());
        queryRunner.createCatalog("tpch", "tpch");
        File baseDir = queryRunner.getCoordinator().getBaseDataDir().resolve("hive_data").toFile();
        HiveClientConfig hiveClientConfig = new HiveClientConfig();
        MetastoreClientConfig metastoreClientConfig = new MetastoreClientConfig();
        HdfsConfiguration hdfsConfiguration = new HiveHdfsConfiguration(new HdfsConfigurationInitializer(hiveClientConfig, metastoreClientConfig), ImmutableSet.of());
        HdfsEnvironment hdfsEnvironment = new HdfsEnvironment(hdfsConfiguration, metastoreClientConfig, new NoHdfsAuthentication());
        FileHiveMetastore metastore = new FileHiveMetastore(hdfsEnvironment, baseDir.toURI().toString(), "test");
        queryRunner.installPlugin(new HivePlugin(HIVE_CATALOG, Optional.of(metastore)));
        Map<String, String> hiveProperties = ImmutableMap.<String, String>builder().putAll(extraHiveProperties).put("hive.time-zone", TIME_ZONE.getID()).put("hive.security", security).put("hive.max-partitions-per-scan", "1000").put("hive.assume-canonical-partition-keys", "true").put("hive.collect-column-statistics-on-write", "true").put("hive.temporary-table-schema", TEMPORARY_TABLE_SCHEMA).build();
        Map<String, String> storageProperties = extraHiveProperties.containsKey("hive.storage-format") ? ImmutableMap.copyOf(hiveProperties) : ImmutableMap.<String, String>builder().putAll(hiveProperties).put("hive.storage-format", "TEXTFILE").put("hive.compression-codec", "NONE").build();
        Map<String, String> hiveBucketedProperties = ImmutableMap.<String, String>builder().putAll(storageProperties).put("hive.max-initial-split-size", // so that each bucket has multiple splits
        "10kB").put("hive.max-split-size", // so that each bucket has multiple splits
        "10kB").build();
        queryRunner.createCatalog(HIVE_CATALOG, HIVE_CATALOG, hiveProperties);
        queryRunner.createCatalog(HIVE_BUCKETED_CATALOG, HIVE_CATALOG, hiveBucketedProperties);
        if (!metastore.getDatabase(METASTORE_CONTEXT, TPCH_SCHEMA).isPresent()) {
            metastore.createDatabase(METASTORE_CONTEXT, createDatabaseMetastoreObject(TPCH_SCHEMA));
            copyTpchTables(queryRunner, "tpch", TINY_SCHEMA_NAME, createSession(Optional.empty()), tables);
        }
        if (!metastore.getDatabase(METASTORE_CONTEXT, TPCH_BUCKETED_SCHEMA).isPresent()) {
            metastore.createDatabase(METASTORE_CONTEXT, createDatabaseMetastoreObject(TPCH_BUCKETED_SCHEMA));
            copyTpchTablesBucketed(queryRunner, "tpch", TINY_SCHEMA_NAME, createBucketedSession(Optional.empty()), tables);
        }
        if (!metastore.getDatabase(METASTORE_CONTEXT, TEMPORARY_TABLE_SCHEMA).isPresent()) {
            metastore.createDatabase(METASTORE_CONTEXT, createDatabaseMetastoreObject(TEMPORARY_TABLE_SCHEMA));
        }
        return queryRunner;
    } catch (Exception e) {
        queryRunner.close();
        throw e;
    }
}
Also used : DistributedQueryRunner(com.facebook.presto.tests.DistributedQueryRunner) SelectedRole(com.facebook.presto.spi.security.SelectedRole) TpchPlugin(com.facebook.presto.tpch.TpchPlugin) NoHdfsAuthentication(com.facebook.presto.hive.authentication.NoHdfsAuthentication) FileHiveMetastore(com.facebook.presto.hive.metastore.file.FileHiveMetastore) TestingHiveEventListenerPlugin(com.facebook.presto.hive.TestHiveEventListenerPlugin.TestingHiveEventListenerPlugin) File(java.io.File)

Example 13 with NoHdfsAuthentication

use of com.facebook.presto.hive.authentication.NoHdfsAuthentication in project presto by prestodb.

the class TestHiveLogicalPlanner method replicateHiveMetastore.

private ExtendedHiveMetastore replicateHiveMetastore(DistributedQueryRunner queryRunner) {
    URI baseDir = queryRunner.getCoordinator().getBaseDataDir().resolve("hive_data").toUri();
    HiveClientConfig hiveClientConfig = new HiveClientConfig();
    MetastoreClientConfig metastoreClientConfig = new MetastoreClientConfig();
    HdfsConfiguration hdfsConfiguration = new HiveHdfsConfiguration(new HdfsConfigurationInitializer(hiveClientConfig, metastoreClientConfig), ImmutableSet.of());
    HdfsEnvironment hdfsEnvironment = new HdfsEnvironment(hdfsConfiguration, metastoreClientConfig, new NoHdfsAuthentication());
    return new FileHiveMetastore(hdfsEnvironment, baseDir.toString(), "test");
}
Also used : FileHiveMetastore(com.facebook.presto.hive.metastore.file.FileHiveMetastore) URI(java.net.URI) NoHdfsAuthentication(com.facebook.presto.hive.authentication.NoHdfsAuthentication)

Aggregations

NoHdfsAuthentication (com.facebook.presto.hive.authentication.NoHdfsAuthentication)13 FileHiveMetastore (com.facebook.presto.hive.metastore.file.FileHiveMetastore)6 GroupByHashPageIndexerFactory (com.facebook.presto.GroupByHashPageIndexerFactory)4 SchemaTableName (com.facebook.presto.spi.SchemaTableName)4 ExecutorService (java.util.concurrent.ExecutorService)4 CounterStat (com.facebook.airlift.stats.CounterStat)3 CacheConfig (com.facebook.presto.cache.CacheConfig)3 OutputStreamDataSinkFactory (com.facebook.presto.hive.datasink.OutputStreamDataSinkFactory)3 ExtendedHiveMetastore (com.facebook.presto.hive.metastore.ExtendedHiveMetastore)3 ColumnHandle (com.facebook.presto.spi.ColumnHandle)3 ConnectorSplitSource (com.facebook.presto.spi.ConnectorSplitSource)3 JoinCompiler (com.facebook.presto.sql.gen.JoinCompiler)3 TestingConnectorSession (com.facebook.presto.testing.TestingConnectorSession)3 MoreExecutors.newDirectExecutorService (com.google.common.util.concurrent.MoreExecutors.newDirectExecutorService)3 File (java.io.File)3 TYPE_MANAGER (com.facebook.presto.hive.HiveTestUtils.TYPE_MANAGER)2 HiveTestUtils.getDefaultHiveDataStreamFactories (com.facebook.presto.hive.HiveTestUtils.getDefaultHiveDataStreamFactories)2 HiveTestUtils.getDefaultHiveFileWriterFactories (com.facebook.presto.hive.HiveTestUtils.getDefaultHiveFileWriterFactories)2 HiveTestUtils.getDefaultHiveRecordCursorProvider (com.facebook.presto.hive.HiveTestUtils.getDefaultHiveRecordCursorProvider)2 HiveTestUtils.getTypes (com.facebook.presto.hive.HiveTestUtils.getTypes)2