use of com.facebook.presto.sql.gen.JoinCompiler in project presto by prestodb.
the class TestHivePageSink method createPageSink.
private static ConnectorPageSink createPageSink(HiveTransactionHandle transaction, HiveClientConfig config, ExtendedHiveMetastore metastore, Path outputPath) {
LocationHandle locationHandle = new LocationHandle(outputPath, Optional.of(outputPath), false);
HiveOutputTableHandle handle = new HiveOutputTableHandle(CLIENT_ID, SCHEMA_NAME, TABLE_NAME, getColumnHandles(), "test", new HivePageSinkMetadata(new SchemaTableName(SCHEMA_NAME, TABLE_NAME), metastore.getTable(SCHEMA_NAME, TABLE_NAME), ImmutableMap.of()), locationHandle, config.getHiveStorageFormat(), config.getHiveStorageFormat(), ImmutableList.of(), Optional.empty(), "test", ImmutableMap.of());
JsonCodec<PartitionUpdate> partitionUpdateCodec = JsonCodec.jsonCodec(PartitionUpdate.class);
HdfsEnvironment hdfsEnvironment = createTestHdfsEnvironment(config);
HivePageSinkProvider provider = new HivePageSinkProvider(getDefaultHiveFileWriterFactories(config), hdfsEnvironment, metastore, new GroupByHashPageIndexerFactory(new JoinCompiler()), TYPE_MANAGER, config, new HiveLocationService(hdfsEnvironment), partitionUpdateCodec);
return provider.createPageSink(transaction, getSession(config), handle);
}
use of com.facebook.presto.sql.gen.JoinCompiler in project presto by prestodb.
the class AbstractTestHiveClientS3 method setup.
protected void setup(String host, int port, String databaseName, String awsAccessKey, String awsSecretKey, String writableBucket) {
this.writableBucket = writableBucket;
setupHive(databaseName);
HiveS3Config s3Config = new HiveS3Config().setS3AwsAccessKey(awsAccessKey).setS3AwsSecretKey(awsSecretKey);
HiveClientConfig hiveClientConfig = new HiveClientConfig();
String proxy = System.getProperty("hive.metastore.thrift.client.socks-proxy");
if (proxy != null) {
hiveClientConfig.setMetastoreSocksProxy(HostAndPort.fromString(proxy));
}
HiveConnectorId connectorId = new HiveConnectorId("hive-test");
HiveCluster hiveCluster = new TestingHiveCluster(hiveClientConfig, host, port);
ExecutorService executor = newCachedThreadPool(daemonThreadsNamed("hive-s3-%s"));
HdfsConfiguration hdfsConfiguration = new HiveHdfsConfiguration(new HdfsConfigurationUpdater(hiveClientConfig, s3Config));
HivePartitionManager hivePartitionManager = new HivePartitionManager(connectorId, TYPE_MANAGER, hiveClientConfig);
hdfsEnvironment = new HdfsEnvironment(hdfsConfiguration, hiveClientConfig, new NoHdfsAuthentication());
metastoreClient = new TestingHiveMetastore(new BridgingHiveMetastore(new ThriftHiveMetastore(hiveCluster)), executor, hiveClientConfig, writableBucket, hdfsEnvironment);
locationService = new HiveLocationService(hdfsEnvironment);
TypeRegistry typeManager = new TypeRegistry();
JsonCodec<PartitionUpdate> partitionUpdateCodec = JsonCodec.jsonCodec(PartitionUpdate.class);
metadataFactory = new HiveMetadataFactory(connectorId, hiveClientConfig, metastoreClient, hdfsEnvironment, hivePartitionManager, newDirectExecutorService(), typeManager, locationService, new TableParameterCodec(), partitionUpdateCodec, new HiveTypeTranslator(), new NodeVersion("test_version"));
transactionManager = new HiveTransactionManager();
splitManager = new HiveSplitManager(connectorId, transactionHandle -> ((HiveMetadata) transactionManager.get(transactionHandle)).getMetastore(), new NamenodeStats(), hdfsEnvironment, new HadoopDirectoryLister(), new BoundedExecutor(executor, hiveClientConfig.getMaxSplitIteratorThreads()), new HiveCoercionPolicy(typeManager), hiveClientConfig.getMaxOutstandingSplits(), hiveClientConfig.getMinPartitionBatchSize(), hiveClientConfig.getMaxPartitionBatchSize(), hiveClientConfig.getMaxInitialSplits(), hiveClientConfig.getRecursiveDirWalkerEnabled());
pageSinkProvider = new HivePageSinkProvider(getDefaultHiveFileWriterFactories(hiveClientConfig), hdfsEnvironment, metastoreClient, new GroupByHashPageIndexerFactory(new JoinCompiler()), typeManager, new HiveClientConfig(), locationService, partitionUpdateCodec);
pageSourceProvider = new HivePageSourceProvider(hiveClientConfig, hdfsEnvironment, getDefaultHiveRecordCursorProvider(hiveClientConfig), getDefaultHiveDataStreamFactories(hiveClientConfig), TYPE_MANAGER);
}
use of com.facebook.presto.sql.gen.JoinCompiler in project presto by prestodb.
the class TestHashSemiJoinOperator method testProbeSideNulls.
//Disabled till #6622 is fixed
@Test(dataProvider = "hashEnabledValues", enabled = false)
public void testProbeSideNulls(boolean hashEnabled) throws Exception {
DriverContext driverContext = taskContext.addPipelineContext(0, true, true).addDriverContext();
// build
OperatorContext operatorContext = driverContext.addOperatorContext(0, new PlanNodeId("test"), ValuesOperator.class.getSimpleName());
List<Type> buildTypes = ImmutableList.of(BIGINT);
RowPagesBuilder rowPagesBuilder = rowPagesBuilder(hashEnabled, Ints.asList(0), buildTypes);
Operator buildOperator = new ValuesOperator(operatorContext, buildTypes, rowPagesBuilder.row(0L).row(1L).row(3L).build());
SetBuilderOperatorFactory setBuilderOperatorFactory = new SetBuilderOperatorFactory(1, new PlanNodeId("test"), buildOperator.getTypes().get(0), 0, rowPagesBuilder.getHashChannel(), 10, new JoinCompiler());
Operator setBuilderOperator = setBuilderOperatorFactory.createOperator(driverContext);
Driver driver = new Driver(driverContext, buildOperator, setBuilderOperator);
while (!driver.isFinished()) {
driver.process();
}
// probe
List<Type> probeTypes = ImmutableList.of(BIGINT);
RowPagesBuilder rowPagesBuilderProbe = rowPagesBuilder(hashEnabled, Ints.asList(0), probeTypes);
List<Page> probeInput = rowPagesBuilderProbe.row(0L).row((Object) null).row(1L).row(2L).build();
HashSemiJoinOperatorFactory joinOperatorFactory = new HashSemiJoinOperatorFactory(2, new PlanNodeId("test"), setBuilderOperatorFactory.getSetProvider(), rowPagesBuilderProbe.getTypes(), 0);
// expected
MaterializedResult expected = resultBuilder(driverContext.getSession(), concat(probeTypes, ImmutableList.of(BOOLEAN))).row(0L, true).row(null, null).row(1L, true).row(2L, false).build();
OperatorAssertion.assertOperatorEquals(joinOperatorFactory, driverContext, probeInput, expected, hashEnabled, ImmutableList.of(probeTypes.size()));
}
use of com.facebook.presto.sql.gen.JoinCompiler in project presto by prestodb.
the class TestHashSemiJoinOperator method testMemoryLimit.
@Test(dataProvider = "hashEnabledValues", expectedExceptions = ExceededMemoryLimitException.class, expectedExceptionsMessageRegExp = "Query exceeded local memory limit of.*")
public void testMemoryLimit(boolean hashEnabled) throws Exception {
DriverContext driverContext = createTaskContext(executor, TEST_SESSION, new DataSize(100, BYTE)).addPipelineContext(0, true, true).addDriverContext();
OperatorContext operatorContext = driverContext.addOperatorContext(0, new PlanNodeId("test"), ValuesOperator.class.getSimpleName());
List<Type> buildTypes = ImmutableList.of(BIGINT);
RowPagesBuilder rowPagesBuilder = rowPagesBuilder(hashEnabled, Ints.asList(0), buildTypes);
Operator buildOperator = new ValuesOperator(operatorContext, buildTypes, rowPagesBuilder.addSequencePage(10000, 20).build());
SetBuilderOperatorFactory setBuilderOperatorFactory = new SetBuilderOperatorFactory(1, new PlanNodeId("test"), buildOperator.getTypes().get(0), 0, rowPagesBuilder.getHashChannel(), 10, new JoinCompiler());
Operator setBuilderOperator = setBuilderOperatorFactory.createOperator(driverContext);
Driver driver = new Driver(driverContext, buildOperator, setBuilderOperator);
while (!driver.isFinished()) {
driver.process();
}
}
use of com.facebook.presto.sql.gen.JoinCompiler in project presto by prestodb.
the class TestHashSemiJoinOperator method testBuildSideNulls.
@Test(dataProvider = "hashEnabledValues")
public void testBuildSideNulls(boolean hashEnabled) throws Exception {
DriverContext driverContext = taskContext.addPipelineContext(0, true, true).addDriverContext();
// build
OperatorContext operatorContext = driverContext.addOperatorContext(0, new PlanNodeId("test"), ValuesOperator.class.getSimpleName());
List<Type> buildTypes = ImmutableList.of(BIGINT);
RowPagesBuilder rowPagesBuilder = rowPagesBuilder(hashEnabled, Ints.asList(0), buildTypes);
Operator buildOperator = new ValuesOperator(operatorContext, buildTypes, rowPagesBuilder.row(0L).row(1L).row(2L).row(2L).row(3L).row((Object) null).build());
SetBuilderOperatorFactory setBuilderOperatorFactory = new SetBuilderOperatorFactory(1, new PlanNodeId("test"), buildOperator.getTypes().get(0), 0, rowPagesBuilder.getHashChannel(), 10, new JoinCompiler());
Operator setBuilderOperator = setBuilderOperatorFactory.createOperator(driverContext);
Driver driver = new Driver(driverContext, buildOperator, setBuilderOperator);
while (!driver.isFinished()) {
driver.process();
}
// probe
List<Type> probeTypes = ImmutableList.of(BIGINT);
RowPagesBuilder rowPagesBuilderProbe = rowPagesBuilder(hashEnabled, Ints.asList(0), probeTypes);
List<Page> probeInput = rowPagesBuilderProbe.addSequencePage(4, 1).build();
HashSemiJoinOperatorFactory joinOperatorFactory = new HashSemiJoinOperatorFactory(2, new PlanNodeId("test"), setBuilderOperatorFactory.getSetProvider(), rowPagesBuilderProbe.getTypes(), 0);
// expected
MaterializedResult expected = resultBuilder(driverContext.getSession(), concat(probeTypes, ImmutableList.of(BOOLEAN))).row(1L, true).row(2L, true).row(3L, true).row(4L, null).build();
OperatorAssertion.assertOperatorEquals(joinOperatorFactory, driverContext, probeInput, expected, hashEnabled, ImmutableList.of(probeTypes.size()));
}
Aggregations