Search in sources :

Example 16 with CacheConfig

use of com.facebook.presto.cache.CacheConfig in project presto by prestodb.

the class TestHiveSessionProperties method testEmptyNodeSelectionStrategyConfig.

@Test
public void testEmptyNodeSelectionStrategyConfig() {
    ConnectorSession connectorSession = new TestingConnectorSession(new HiveSessionProperties(new HiveClientConfig(), new OrcFileWriterConfig(), new ParquetFileWriterConfig(), new CacheConfig()).getSessionProperties());
    assertEquals(getNodeSelectionStrategy(connectorSession), NO_PREFERENCE);
}
Also used : TestingConnectorSession(com.facebook.presto.testing.TestingConnectorSession) ConnectorSession(com.facebook.presto.spi.ConnectorSession) TestingConnectorSession(com.facebook.presto.testing.TestingConnectorSession) CacheConfig(com.facebook.presto.cache.CacheConfig) Test(org.testng.annotations.Test)

Example 17 with CacheConfig

use of com.facebook.presto.cache.CacheConfig in project presto by prestodb.

the class TestHiveSessionProperties method testEmptyConfigNodeSelectionStrategyConfig.

@Test
public void testEmptyConfigNodeSelectionStrategyConfig() {
    ConnectorSession connectorSession = new TestingConnectorSession(new HiveSessionProperties(new HiveClientConfig().setNodeSelectionStrategy(NodeSelectionStrategy.valueOf("NO_PREFERENCE")), new OrcFileWriterConfig(), new ParquetFileWriterConfig(), new CacheConfig()).getSessionProperties());
    assertEquals(getNodeSelectionStrategy(connectorSession), NO_PREFERENCE);
}
Also used : TestingConnectorSession(com.facebook.presto.testing.TestingConnectorSession) ConnectorSession(com.facebook.presto.spi.ConnectorSession) TestingConnectorSession(com.facebook.presto.testing.TestingConnectorSession) CacheConfig(com.facebook.presto.cache.CacheConfig) Test(org.testng.annotations.Test)

Example 18 with CacheConfig

use of com.facebook.presto.cache.CacheConfig in project presto by prestodb.

the class TestHiveMetadataFileFormatEncryptionSettings method testFailureWithInsertIntoPartitionedTableWithNonDwrfPartition.

@Test(expectedExceptions = PrestoException.class, expectedExceptionsMessageRegExp = "For encrypted tables, partition format \\(ORC\\) should match table format \\(DWRF\\).*")
public void testFailureWithInsertIntoPartitionedTableWithNonDwrfPartition() {
    String tableName = "test_enc_with_insert_partitioned_table_non_dwrf_partition";
    ConnectorTableMetadata table = getConnectorTableMetadata(tableName, ImmutableMap.of(ENCRYPT_COLUMNS, fromTableProperty(ImmutableList.of("key1:t_struct.str")), DWRF_ENCRYPTION_ALGORITHM, "test_algo", DWRF_ENCRYPTION_PROVIDER, "test_provider"), true);
    try {
        HiveMetadata createHiveMetadata = metadataFactory.get();
        createHiveMetadata.createTable(SESSION, table, false);
        createHiveMetadata.commit();
        HiveMetadata insertHiveMetadata = metadataFactory.get();
        ConnectorSession newSession = new TestingConnectorSession(new HiveSessionProperties(new HiveClientConfig().setRespectTableFormat(false).setHiveStorageFormat(ORC), new OrcFileWriterConfig(), new ParquetFileWriterConfig(), new CacheConfig()).getSessionProperties());
        insertHiveMetadata.beginInsert(newSession, new HiveTableHandle(TEST_DB_NAME, tableName));
    } finally {
        dropTable(tableName);
    }
}
Also used : TestingConnectorSession(com.facebook.presto.testing.TestingConnectorSession) ConnectorSession(com.facebook.presto.spi.ConnectorSession) TestingConnectorSession(com.facebook.presto.testing.TestingConnectorSession) CacheConfig(com.facebook.presto.cache.CacheConfig) ConnectorTableMetadata(com.facebook.presto.spi.ConnectorTableMetadata) Test(org.testng.annotations.Test)

Example 19 with CacheConfig

use of com.facebook.presto.cache.CacheConfig in project presto by prestodb.

the class TestOrcBatchPageSourceMemoryTracking method testMaxReadBytes.

@Test(dataProvider = "rowCount")
public void testMaxReadBytes(int rowCount) throws Exception {
    int maxReadBytes = 1_000;
    HiveClientConfig config = new HiveClientConfig();
    config.setOrcMaxReadBlockSize(new DataSize(maxReadBytes, BYTE));
    ConnectorSession session = new TestingConnectorSession(new HiveSessionProperties(config, new OrcFileWriterConfig(), new ParquetFileWriterConfig(), new CacheConfig()).getSessionProperties());
    FileFormatDataSourceStats stats = new FileFormatDataSourceStats();
    // Build a table where every row gets larger, so we can test that the "batchSize" reduces
    int numColumns = 5;
    int step = 250;
    ImmutableList.Builder<TestColumn> columnBuilder = ImmutableList.<TestColumn>builder().add(new TestColumn("p_empty_string", javaStringObjectInspector, () -> "", true));
    GrowingTestColumn[] dataColumns = new GrowingTestColumn[numColumns];
    for (int i = 0; i < numColumns; i++) {
        dataColumns[i] = new GrowingTestColumn("p_string", javaStringObjectInspector, () -> Long.toHexString(random.nextLong()), false, step * (i + 1));
        columnBuilder.add(dataColumns[i]);
    }
    List<TestColumn> testColumns = columnBuilder.build();
    File tempFile = File.createTempFile("presto_test_orc_page_source_max_read_bytes", "orc");
    tempFile.delete();
    TestPreparer testPreparer = new TestPreparer(tempFile.getAbsolutePath(), testColumns, rowCount, rowCount);
    ConnectorPageSource pageSource = testPreparer.newPageSource(stats, session);
    try {
        int positionCount = 0;
        while (true) {
            Page page = pageSource.getNextPage();
            if (pageSource.isFinished()) {
                break;
            }
            assertNotNull(page);
            page = page.getLoadedPage();
            positionCount += page.getPositionCount();
            // ignore the first MAX_BATCH_SIZE rows given the sizes are set when loading the blocks
            if (positionCount > MAX_BATCH_SIZE) {
                // either the block is bounded by maxReadBytes or we just load one single large block
                // an error margin MAX_BATCH_SIZE / step is needed given the block sizes are increasing
                assertTrue(page.getSizeInBytes() < maxReadBytes * (MAX_BATCH_SIZE / step) || 1 == page.getPositionCount());
            }
        }
        // verify the stats are correctly recorded
        Distribution distribution = stats.getMaxCombinedBytesPerRow().getAllTime();
        assertEquals((int) distribution.getCount(), 1);
        // the block is VariableWidthBlock that contains valueIsNull and offsets arrays as overhead
        assertEquals((int) distribution.getMax(), Arrays.stream(dataColumns).mapToInt(GrowingTestColumn::getMaxSize).sum() + (Integer.BYTES + Byte.BYTES) * numColumns);
        pageSource.close();
    } finally {
        tempFile.delete();
    }
}
Also used : ImmutableList(com.google.common.collect.ImmutableList) TestingConnectorSession(com.facebook.presto.testing.TestingConnectorSession) Page(com.facebook.presto.common.Page) ConnectorPageSource(com.facebook.presto.spi.ConnectorPageSource) DataSize(io.airlift.units.DataSize) Distribution(com.facebook.airlift.stats.Distribution) ConnectorSession(com.facebook.presto.spi.ConnectorSession) TestingConnectorSession(com.facebook.presto.testing.TestingConnectorSession) CacheConfig(com.facebook.presto.cache.CacheConfig) SequenceFile(org.apache.hadoop.io.SequenceFile) File(java.io.File) OrcFile(org.apache.hadoop.hive.ql.io.orc.OrcFile) Test(org.testng.annotations.Test)

Example 20 with CacheConfig

use of com.facebook.presto.cache.CacheConfig in project presto by prestodb.

the class TestHivePageSourceProvider method testNotUseRecordReaderWithInputFormatAnnotationWithoutCustomSplit.

@Test
public void testNotUseRecordReaderWithInputFormatAnnotationWithoutCustomSplit() {
    StorageFormat storageFormat = StorageFormat.create(ParquetHiveSerDe.class.getName(), HoodieParquetInputFormat.class.getName(), "");
    Storage storage = new Storage(storageFormat, "test", Optional.empty(), true, ImmutableMap.of(), ImmutableMap.of());
    HiveRecordCursorProvider recordCursorProvider = new MockHiveRecordCursorProvider();
    HiveBatchPageSourceFactory hiveBatchPageSourceFactory = new MockHiveBatchPageSourceFactory();
    Optional<ConnectorPageSource> pageSource = HivePageSourceProvider.createHivePageSource(ImmutableSet.of(recordCursorProvider), ImmutableSet.of(hiveBatchPageSourceFactory), new Configuration(), new TestingConnectorSession(new HiveSessionProperties(new HiveClientConfig().setUseRecordPageSourceForCustomSplit(true), new OrcFileWriterConfig(), new ParquetFileWriterConfig(), new CacheConfig()).getSessionProperties()), new Path("/test/"), OptionalInt.empty(), 0, 100, 200, Instant.now().toEpochMilli(), storage, TupleDomain.none(), ImmutableList.of(), ImmutableMap.of(), ImmutableList.of(), DateTimeZone.UTC, new TestingTypeManager(), new SchemaTableName("test", "test"), ImmutableList.of(), ImmutableList.of(), ImmutableMap.of(), 0, TableToPartitionMapping.empty(), Optional.empty(), false, null, null, false, null, Optional.empty(), ImmutableMap.of());
    assertTrue(pageSource.isPresent());
    assertTrue(pageSource.get() instanceof HivePageSource);
}
Also used : Path(org.apache.hadoop.fs.Path) ParquetHiveSerDe(org.apache.hadoop.hive.ql.io.parquet.serde.ParquetHiveSerDe) Configuration(org.apache.hadoop.conf.Configuration) TestingConnectorSession(com.facebook.presto.testing.TestingConnectorSession) StorageFormat(com.facebook.presto.hive.metastore.StorageFormat) HoodieParquetInputFormat(org.apache.hudi.hadoop.HoodieParquetInputFormat) ConnectorPageSource(com.facebook.presto.spi.ConnectorPageSource) SchemaTableName(com.facebook.presto.spi.SchemaTableName) Storage(com.facebook.presto.hive.metastore.Storage) CacheConfig(com.facebook.presto.cache.CacheConfig) TestingTypeManager(com.facebook.presto.common.type.TestingTypeManager) Test(org.testng.annotations.Test)

Aggregations

CacheConfig (com.facebook.presto.cache.CacheConfig)45 Test (org.testng.annotations.Test)33 TestingConnectorSession (com.facebook.presto.testing.TestingConnectorSession)31 ConnectorSession (com.facebook.presto.spi.ConnectorSession)18 Configuration (org.apache.hadoop.conf.Configuration)12 SchemaTableName (com.facebook.presto.spi.SchemaTableName)9 ConnectorPageSource (com.facebook.presto.spi.ConnectorPageSource)8 HiveClientConfig (com.facebook.presto.hive.HiveClientConfig)7 HiveSessionProperties (com.facebook.presto.hive.HiveSessionProperties)7 OrcFileWriterConfig (com.facebook.presto.hive.OrcFileWriterConfig)7 ParquetFileWriterConfig (com.facebook.presto.hive.ParquetFileWriterConfig)7 TestingCacheUtils.stressTest (com.facebook.presto.cache.TestingCacheUtils.stressTest)6 Storage (com.facebook.presto.hive.metastore.Storage)6 File (java.io.File)6 StorageFormat (com.facebook.presto.hive.metastore.StorageFormat)5 OutputStreamDataSinkFactory (com.facebook.presto.hive.datasink.OutputStreamDataSinkFactory)4 RcFilePageSourceFactory (com.facebook.presto.hive.rcfile.RcFilePageSourceFactory)4 DataSize (io.airlift.units.DataSize)4 BeforeClass (org.testng.annotations.BeforeClass)4 CounterStat (com.facebook.airlift.stats.CounterStat)3