use of com.facebook.presto.spi.ConnectorSession in project presto by prestodb.
the class TestHiveMetadataFileFormatEncryptionSettings method testFailureWithInsertIntoPartitionedTableWithNonDwrfPartition.
@Test(expectedExceptions = PrestoException.class, expectedExceptionsMessageRegExp = "For encrypted tables, partition format \\(ORC\\) should match table format \\(DWRF\\).*")
public void testFailureWithInsertIntoPartitionedTableWithNonDwrfPartition() {
String tableName = "test_enc_with_insert_partitioned_table_non_dwrf_partition";
ConnectorTableMetadata table = getConnectorTableMetadata(tableName, ImmutableMap.of(ENCRYPT_COLUMNS, fromTableProperty(ImmutableList.of("key1:t_struct.str")), DWRF_ENCRYPTION_ALGORITHM, "test_algo", DWRF_ENCRYPTION_PROVIDER, "test_provider"), true);
try {
HiveMetadata createHiveMetadata = metadataFactory.get();
createHiveMetadata.createTable(SESSION, table, false);
createHiveMetadata.commit();
HiveMetadata insertHiveMetadata = metadataFactory.get();
ConnectorSession newSession = new TestingConnectorSession(new HiveSessionProperties(new HiveClientConfig().setRespectTableFormat(false).setHiveStorageFormat(ORC), new OrcFileWriterConfig(), new ParquetFileWriterConfig(), new CacheConfig()).getSessionProperties());
insertHiveMetadata.beginInsert(newSession, new HiveTableHandle(TEST_DB_NAME, tableName));
} finally {
dropTable(tableName);
}
}
use of com.facebook.presto.spi.ConnectorSession in project presto by prestodb.
the class TestOrcBatchPageSourceMemoryTracking method testMaxReadBytes.
@Test(dataProvider = "rowCount")
public void testMaxReadBytes(int rowCount) throws Exception {
int maxReadBytes = 1_000;
HiveClientConfig config = new HiveClientConfig();
config.setOrcMaxReadBlockSize(new DataSize(maxReadBytes, BYTE));
ConnectorSession session = new TestingConnectorSession(new HiveSessionProperties(config, new OrcFileWriterConfig(), new ParquetFileWriterConfig(), new CacheConfig()).getSessionProperties());
FileFormatDataSourceStats stats = new FileFormatDataSourceStats();
// Build a table where every row gets larger, so we can test that the "batchSize" reduces
int numColumns = 5;
int step = 250;
ImmutableList.Builder<TestColumn> columnBuilder = ImmutableList.<TestColumn>builder().add(new TestColumn("p_empty_string", javaStringObjectInspector, () -> "", true));
GrowingTestColumn[] dataColumns = new GrowingTestColumn[numColumns];
for (int i = 0; i < numColumns; i++) {
dataColumns[i] = new GrowingTestColumn("p_string", javaStringObjectInspector, () -> Long.toHexString(random.nextLong()), false, step * (i + 1));
columnBuilder.add(dataColumns[i]);
}
List<TestColumn> testColumns = columnBuilder.build();
File tempFile = File.createTempFile("presto_test_orc_page_source_max_read_bytes", "orc");
tempFile.delete();
TestPreparer testPreparer = new TestPreparer(tempFile.getAbsolutePath(), testColumns, rowCount, rowCount);
ConnectorPageSource pageSource = testPreparer.newPageSource(stats, session);
try {
int positionCount = 0;
while (true) {
Page page = pageSource.getNextPage();
if (pageSource.isFinished()) {
break;
}
assertNotNull(page);
page = page.getLoadedPage();
positionCount += page.getPositionCount();
// ignore the first MAX_BATCH_SIZE rows given the sizes are set when loading the blocks
if (positionCount > MAX_BATCH_SIZE) {
// either the block is bounded by maxReadBytes or we just load one single large block
// an error margin MAX_BATCH_SIZE / step is needed given the block sizes are increasing
assertTrue(page.getSizeInBytes() < maxReadBytes * (MAX_BATCH_SIZE / step) || 1 == page.getPositionCount());
}
}
// verify the stats are correctly recorded
Distribution distribution = stats.getMaxCombinedBytesPerRow().getAllTime();
assertEquals((int) distribution.getCount(), 1);
// the block is VariableWidthBlock that contains valueIsNull and offsets arrays as overhead
assertEquals((int) distribution.getMax(), Arrays.stream(dataColumns).mapToInt(GrowingTestColumn::getMaxSize).sum() + (Integer.BYTES + Byte.BYTES) * numColumns);
pageSource.close();
} finally {
tempFile.delete();
}
}
use of com.facebook.presto.spi.ConnectorSession in project presto by prestodb.
the class TestHivePartitionManager method testIgnoresBucketingWhenTooManyBuckets.
@Test
public void testIgnoresBucketingWhenTooManyBuckets() {
ConnectorSession session = new TestingConnectorSession(new HiveSessionProperties(new HiveClientConfig().setMaxBucketsForGroupedExecution(100), new OrcFileWriterConfig(), new ParquetFileWriterConfig(), new CacheConfig()).getSessionProperties());
HivePartitionResult result = hivePartitionManager.getPartitions(metastore, new HiveTableHandle(SCHEMA_NAME, TABLE_NAME), Constraint.alwaysTrue(), session);
assertFalse(result.getBucketHandle().isPresent(), "bucketHandle is present");
assertFalse(result.getBucketFilter().isPresent(), "bucketFilter is present");
}
use of com.facebook.presto.spi.ConnectorSession in project presto by prestodb.
the class SplitManager method getSplits.
public SplitSource getSplits(Session session, TableHandle table, SplitSchedulingStrategy splitSchedulingStrategy, WarningCollector warningCollector) {
long startTime = System.nanoTime();
ConnectorId connectorId = table.getConnectorId();
ConnectorSplitManager splitManager = getConnectorSplitManager(connectorId);
ConnectorSession connectorSession = session.toConnectorSession(connectorId);
// Now we will fetch the layout handle if it's not presented in TableHandle.
// In the future, ConnectorTableHandle will be used to fetch splits since it will contain layout information.
ConnectorTableLayoutHandle layout;
if (!table.getLayout().isPresent()) {
TableLayoutResult result = metadata.getLayout(session, table, Constraint.alwaysTrue(), Optional.empty());
layout = result.getLayout().getLayoutHandle();
} else {
layout = table.getLayout().get();
}
ConnectorSplitSource source = splitManager.getSplits(table.getTransaction(), connectorSession, layout, new SplitSchedulingContext(splitSchedulingStrategy, preferSplitHostAddresses, warningCollector));
SplitSource splitSource = new ConnectorAwareSplitSource(connectorId, table.getTransaction(), source);
if (minScheduleSplitBatchSize > 1) {
splitSource = new BufferingSplitSource(splitSource, minScheduleSplitBatchSize);
}
return splitSource;
}
use of com.facebook.presto.spi.ConnectorSession in project presto by prestodb.
the class TestJdbcComputePushdown method testJdbcComputePushdownBooleanOperations.
@Test
public void testJdbcComputePushdownBooleanOperations() {
String table = "test_table";
String schema = "test_schema";
String expression = "(((c1 + c2) - c2 <> c2) OR c2 = c1) AND c1 <> c2";
TypeProvider typeProvider = TypeProvider.copyOf(ImmutableMap.of("c1", BIGINT, "c2", BIGINT));
RowExpression rowExpression = sqlToRowExpressionTranslator.translateAndOptimize(expression(expression), typeProvider);
Set<ColumnHandle> columns = Stream.of("c1", "c2").map(TestJdbcComputePushdown::integerJdbcColumnHandle).collect(Collectors.toSet());
PlanNode original = filter(jdbcTableScan(schema, table, BIGINT, "c1", "c2"), rowExpression);
JdbcTableHandle jdbcTableHandle = new JdbcTableHandle(CONNECTOR_ID, new SchemaTableName(schema, table), CATALOG_NAME, schema, table);
ConnectorSession session = new TestingConnectorSession(ImmutableList.of());
JdbcTableLayoutHandle jdbcTableLayoutHandle = new JdbcTableLayoutHandle(session.getSqlFunctionProperties(), jdbcTableHandle, TupleDomain.none(), Optional.of(new JdbcExpression("((((((('c1' + 'c2') - 'c2') <> 'c2')) OR (('c2' = 'c1')))) AND (('c1' <> 'c2')))")));
PlanNode actual = this.jdbcComputePushdown.optimize(original, session, null, ID_ALLOCATOR);
assertPlanMatch(actual, PlanMatchPattern.filter(expression, JdbcTableScanMatcher.jdbcTableScanPattern(jdbcTableLayoutHandle, columns)));
}
Aggregations