use of com.facebook.presto.cache.CacheConfig in project presto by prestodb.
the class TestHiveSessionProperties method testEmptyNodeSelectionStrategyConfig.
@Test
public void testEmptyNodeSelectionStrategyConfig() {
ConnectorSession connectorSession = new TestingConnectorSession(new HiveSessionProperties(new HiveClientConfig(), new OrcFileWriterConfig(), new ParquetFileWriterConfig(), new CacheConfig()).getSessionProperties());
assertEquals(getNodeSelectionStrategy(connectorSession), NO_PREFERENCE);
}
use of com.facebook.presto.cache.CacheConfig in project presto by prestodb.
the class TestHiveSessionProperties method testEmptyConfigNodeSelectionStrategyConfig.
@Test
public void testEmptyConfigNodeSelectionStrategyConfig() {
ConnectorSession connectorSession = new TestingConnectorSession(new HiveSessionProperties(new HiveClientConfig().setNodeSelectionStrategy(NodeSelectionStrategy.valueOf("NO_PREFERENCE")), new OrcFileWriterConfig(), new ParquetFileWriterConfig(), new CacheConfig()).getSessionProperties());
assertEquals(getNodeSelectionStrategy(connectorSession), NO_PREFERENCE);
}
use of com.facebook.presto.cache.CacheConfig in project presto by prestodb.
the class TestHiveMetadataFileFormatEncryptionSettings method testFailureWithInsertIntoPartitionedTableWithNonDwrfPartition.
@Test(expectedExceptions = PrestoException.class, expectedExceptionsMessageRegExp = "For encrypted tables, partition format \\(ORC\\) should match table format \\(DWRF\\).*")
public void testFailureWithInsertIntoPartitionedTableWithNonDwrfPartition() {
String tableName = "test_enc_with_insert_partitioned_table_non_dwrf_partition";
ConnectorTableMetadata table = getConnectorTableMetadata(tableName, ImmutableMap.of(ENCRYPT_COLUMNS, fromTableProperty(ImmutableList.of("key1:t_struct.str")), DWRF_ENCRYPTION_ALGORITHM, "test_algo", DWRF_ENCRYPTION_PROVIDER, "test_provider"), true);
try {
HiveMetadata createHiveMetadata = metadataFactory.get();
createHiveMetadata.createTable(SESSION, table, false);
createHiveMetadata.commit();
HiveMetadata insertHiveMetadata = metadataFactory.get();
ConnectorSession newSession = new TestingConnectorSession(new HiveSessionProperties(new HiveClientConfig().setRespectTableFormat(false).setHiveStorageFormat(ORC), new OrcFileWriterConfig(), new ParquetFileWriterConfig(), new CacheConfig()).getSessionProperties());
insertHiveMetadata.beginInsert(newSession, new HiveTableHandle(TEST_DB_NAME, tableName));
} finally {
dropTable(tableName);
}
}
use of com.facebook.presto.cache.CacheConfig in project presto by prestodb.
the class TestOrcBatchPageSourceMemoryTracking method testMaxReadBytes.
@Test(dataProvider = "rowCount")
public void testMaxReadBytes(int rowCount) throws Exception {
int maxReadBytes = 1_000;
HiveClientConfig config = new HiveClientConfig();
config.setOrcMaxReadBlockSize(new DataSize(maxReadBytes, BYTE));
ConnectorSession session = new TestingConnectorSession(new HiveSessionProperties(config, new OrcFileWriterConfig(), new ParquetFileWriterConfig(), new CacheConfig()).getSessionProperties());
FileFormatDataSourceStats stats = new FileFormatDataSourceStats();
// Build a table where every row gets larger, so we can test that the "batchSize" reduces
int numColumns = 5;
int step = 250;
ImmutableList.Builder<TestColumn> columnBuilder = ImmutableList.<TestColumn>builder().add(new TestColumn("p_empty_string", javaStringObjectInspector, () -> "", true));
GrowingTestColumn[] dataColumns = new GrowingTestColumn[numColumns];
for (int i = 0; i < numColumns; i++) {
dataColumns[i] = new GrowingTestColumn("p_string", javaStringObjectInspector, () -> Long.toHexString(random.nextLong()), false, step * (i + 1));
columnBuilder.add(dataColumns[i]);
}
List<TestColumn> testColumns = columnBuilder.build();
File tempFile = File.createTempFile("presto_test_orc_page_source_max_read_bytes", "orc");
tempFile.delete();
TestPreparer testPreparer = new TestPreparer(tempFile.getAbsolutePath(), testColumns, rowCount, rowCount);
ConnectorPageSource pageSource = testPreparer.newPageSource(stats, session);
try {
int positionCount = 0;
while (true) {
Page page = pageSource.getNextPage();
if (pageSource.isFinished()) {
break;
}
assertNotNull(page);
page = page.getLoadedPage();
positionCount += page.getPositionCount();
// ignore the first MAX_BATCH_SIZE rows given the sizes are set when loading the blocks
if (positionCount > MAX_BATCH_SIZE) {
// either the block is bounded by maxReadBytes or we just load one single large block
// an error margin MAX_BATCH_SIZE / step is needed given the block sizes are increasing
assertTrue(page.getSizeInBytes() < maxReadBytes * (MAX_BATCH_SIZE / step) || 1 == page.getPositionCount());
}
}
// verify the stats are correctly recorded
Distribution distribution = stats.getMaxCombinedBytesPerRow().getAllTime();
assertEquals((int) distribution.getCount(), 1);
// the block is VariableWidthBlock that contains valueIsNull and offsets arrays as overhead
assertEquals((int) distribution.getMax(), Arrays.stream(dataColumns).mapToInt(GrowingTestColumn::getMaxSize).sum() + (Integer.BYTES + Byte.BYTES) * numColumns);
pageSource.close();
} finally {
tempFile.delete();
}
}
use of com.facebook.presto.cache.CacheConfig in project presto by prestodb.
the class TestHivePageSourceProvider method testNotUseRecordReaderWithInputFormatAnnotationWithoutCustomSplit.
@Test
public void testNotUseRecordReaderWithInputFormatAnnotationWithoutCustomSplit() {
StorageFormat storageFormat = StorageFormat.create(ParquetHiveSerDe.class.getName(), HoodieParquetInputFormat.class.getName(), "");
Storage storage = new Storage(storageFormat, "test", Optional.empty(), true, ImmutableMap.of(), ImmutableMap.of());
HiveRecordCursorProvider recordCursorProvider = new MockHiveRecordCursorProvider();
HiveBatchPageSourceFactory hiveBatchPageSourceFactory = new MockHiveBatchPageSourceFactory();
Optional<ConnectorPageSource> pageSource = HivePageSourceProvider.createHivePageSource(ImmutableSet.of(recordCursorProvider), ImmutableSet.of(hiveBatchPageSourceFactory), new Configuration(), new TestingConnectorSession(new HiveSessionProperties(new HiveClientConfig().setUseRecordPageSourceForCustomSplit(true), new OrcFileWriterConfig(), new ParquetFileWriterConfig(), new CacheConfig()).getSessionProperties()), new Path("/test/"), OptionalInt.empty(), 0, 100, 200, Instant.now().toEpochMilli(), storage, TupleDomain.none(), ImmutableList.of(), ImmutableMap.of(), ImmutableList.of(), DateTimeZone.UTC, new TestingTypeManager(), new SchemaTableName("test", "test"), ImmutableList.of(), ImmutableList.of(), ImmutableMap.of(), 0, TableToPartitionMapping.empty(), Optional.empty(), false, null, null, false, null, Optional.empty(), ImmutableMap.of());
assertTrue(pageSource.isPresent());
assertTrue(pageSource.get() instanceof HivePageSource);
}
Aggregations