use of com.facebook.presto.type.TypeRegistry in project presto by prestodb.
the class TestFunctionRegistry method testMagicLiteralFunction.
@Test
public void testMagicLiteralFunction() {
Signature signature = getMagicLiteralFunctionSignature(TIMESTAMP_WITH_TIME_ZONE);
assertEquals(signature.getName(), "$literal$timestamp with time zone");
assertEquals(signature.getArgumentTypes(), ImmutableList.of(parseTypeSignature(StandardTypes.BIGINT)));
assertEquals(signature.getReturnType().getBase(), StandardTypes.TIMESTAMP_WITH_TIME_ZONE);
TypeRegistry typeManager = new TypeRegistry();
FunctionRegistry registry = new FunctionRegistry(typeManager, new BlockEncodingManager(typeManager), new FeaturesConfig());
Signature function = registry.resolveFunction(QualifiedName.of(signature.getName()), fromTypeSignatures(signature.getArgumentTypes()));
assertEquals(function.getArgumentTypes(), ImmutableList.of(parseTypeSignature(StandardTypes.BIGINT)));
assertEquals(signature.getReturnType().getBase(), StandardTypes.TIMESTAMP_WITH_TIME_ZONE);
}
use of com.facebook.presto.type.TypeRegistry in project presto by prestodb.
the class AbstractTestHiveClient method setup.
protected final void setup(String databaseName, HiveClientConfig hiveClientConfig, ExtendedHiveMetastore hiveMetastore) {
HiveConnectorId connectorId = new HiveConnectorId("hive-test");
setupHive(connectorId.toString(), databaseName, hiveClientConfig.getTimeZone());
metastoreClient = hiveMetastore;
HdfsConfiguration hdfsConfiguration = new HiveHdfsConfiguration(new HdfsConfigurationUpdater(hiveClientConfig, new HiveS3Config()));
hdfsEnvironment = new HdfsEnvironment(hdfsConfiguration, hiveClientConfig, new NoHdfsAuthentication());
locationService = new HiveLocationService(hdfsEnvironment);
TypeManager typeManager = new TypeRegistry();
JsonCodec<PartitionUpdate> partitionUpdateCodec = JsonCodec.jsonCodec(PartitionUpdate.class);
metadataFactory = new HiveMetadataFactory(connectorId, metastoreClient, hdfsEnvironment, new HivePartitionManager(connectorId, TYPE_MANAGER, hiveClientConfig), timeZone, 10, true, true, false, true, HiveStorageFormat.RCBINARY, 1000, typeManager, locationService, new TableParameterCodec(), partitionUpdateCodec, newFixedThreadPool(2), new HiveTypeTranslator(), TEST_SERVER_VERSION);
transactionManager = new HiveTransactionManager();
splitManager = new HiveSplitManager(connectorId, transactionHandle -> ((HiveMetadata) transactionManager.get(transactionHandle)).getMetastore(), new NamenodeStats(), hdfsEnvironment, new HadoopDirectoryLister(), newDirectExecutorService(), new HiveCoercionPolicy(typeManager), 100, hiveClientConfig.getMinPartitionBatchSize(), hiveClientConfig.getMaxPartitionBatchSize(), hiveClientConfig.getMaxInitialSplits(), false);
pageSinkProvider = new HivePageSinkProvider(getDefaultHiveFileWriterFactories(hiveClientConfig), hdfsEnvironment, metastoreClient, new GroupByHashPageIndexerFactory(JOIN_COMPILER), typeManager, new HiveClientConfig(), locationService, partitionUpdateCodec);
pageSourceProvider = new HivePageSourceProvider(hiveClientConfig, hdfsEnvironment, getDefaultHiveRecordCursorProvider(hiveClientConfig), getDefaultHiveDataStreamFactories(hiveClientConfig), TYPE_MANAGER);
}
use of com.facebook.presto.type.TypeRegistry in project presto by prestodb.
the class AbstractTestHiveClientS3 method setup.
protected void setup(String host, int port, String databaseName, String awsAccessKey, String awsSecretKey, String writableBucket) {
this.writableBucket = writableBucket;
setupHive(databaseName);
HiveS3Config s3Config = new HiveS3Config().setS3AwsAccessKey(awsAccessKey).setS3AwsSecretKey(awsSecretKey);
HiveClientConfig hiveClientConfig = new HiveClientConfig();
String proxy = System.getProperty("hive.metastore.thrift.client.socks-proxy");
if (proxy != null) {
hiveClientConfig.setMetastoreSocksProxy(HostAndPort.fromString(proxy));
}
HiveConnectorId connectorId = new HiveConnectorId("hive-test");
HiveCluster hiveCluster = new TestingHiveCluster(hiveClientConfig, host, port);
ExecutorService executor = newCachedThreadPool(daemonThreadsNamed("hive-s3-%s"));
HdfsConfiguration hdfsConfiguration = new HiveHdfsConfiguration(new HdfsConfigurationUpdater(hiveClientConfig, s3Config));
HivePartitionManager hivePartitionManager = new HivePartitionManager(connectorId, TYPE_MANAGER, hiveClientConfig);
hdfsEnvironment = new HdfsEnvironment(hdfsConfiguration, hiveClientConfig, new NoHdfsAuthentication());
metastoreClient = new TestingHiveMetastore(new BridgingHiveMetastore(new ThriftHiveMetastore(hiveCluster)), executor, hiveClientConfig, writableBucket, hdfsEnvironment);
locationService = new HiveLocationService(hdfsEnvironment);
TypeRegistry typeManager = new TypeRegistry();
JsonCodec<PartitionUpdate> partitionUpdateCodec = JsonCodec.jsonCodec(PartitionUpdate.class);
metadataFactory = new HiveMetadataFactory(connectorId, hiveClientConfig, metastoreClient, hdfsEnvironment, hivePartitionManager, newDirectExecutorService(), typeManager, locationService, new TableParameterCodec(), partitionUpdateCodec, new HiveTypeTranslator(), new NodeVersion("test_version"));
transactionManager = new HiveTransactionManager();
splitManager = new HiveSplitManager(connectorId, transactionHandle -> ((HiveMetadata) transactionManager.get(transactionHandle)).getMetastore(), new NamenodeStats(), hdfsEnvironment, new HadoopDirectoryLister(), new BoundedExecutor(executor, hiveClientConfig.getMaxSplitIteratorThreads()), new HiveCoercionPolicy(typeManager), hiveClientConfig.getMaxOutstandingSplits(), hiveClientConfig.getMinPartitionBatchSize(), hiveClientConfig.getMaxPartitionBatchSize(), hiveClientConfig.getMaxInitialSplits(), hiveClientConfig.getRecursiveDirWalkerEnabled());
pageSinkProvider = new HivePageSinkProvider(getDefaultHiveFileWriterFactories(hiveClientConfig), hdfsEnvironment, metastoreClient, new GroupByHashPageIndexerFactory(new JoinCompiler()), typeManager, new HiveClientConfig(), locationService, partitionUpdateCodec);
pageSourceProvider = new HivePageSourceProvider(hiveClientConfig, hdfsEnvironment, getDefaultHiveRecordCursorProvider(hiveClientConfig), getDefaultHiveDataStreamFactories(hiveClientConfig), TYPE_MANAGER);
}
use of com.facebook.presto.type.TypeRegistry in project presto by prestodb.
the class TestAnalyzer method setup.
@BeforeMethod(alwaysRun = true)
public void setup() throws Exception {
TypeManager typeManager = new TypeRegistry();
CatalogManager catalogManager = new CatalogManager();
transactionManager = createTestTransactionManager(catalogManager);
accessControl = new AccessControlManager(transactionManager);
metadata = new MetadataManager(new FeaturesConfig(), typeManager, new BlockEncodingManager(typeManager), new SessionPropertyManager(), new SchemaPropertyManager(), new TablePropertyManager(), transactionManager);
metadata.getFunctionRegistry().addFunctions(ImmutableList.of(APPLY_FUNCTION));
catalogManager.registerCatalog(createTestingCatalog(TPCH_CATALOG, TPCH_CONNECTOR_ID));
catalogManager.registerCatalog(createTestingCatalog(SECOND_CATALOG, SECOND_CONNECTOR_ID));
catalogManager.registerCatalog(createTestingCatalog(THIRD_CATALOG, THIRD_CONNECTOR_ID));
SchemaTableName table1 = new SchemaTableName("s1", "t1");
inSetupTransaction(session -> metadata.createTable(session, TPCH_CATALOG, new ConnectorTableMetadata(table1, ImmutableList.of(new ColumnMetadata("a", BIGINT), new ColumnMetadata("b", BIGINT), new ColumnMetadata("c", BIGINT), new ColumnMetadata("d", BIGINT)))));
SchemaTableName table2 = new SchemaTableName("s1", "t2");
inSetupTransaction(session -> metadata.createTable(session, TPCH_CATALOG, new ConnectorTableMetadata(table2, ImmutableList.of(new ColumnMetadata("a", BIGINT), new ColumnMetadata("b", BIGINT)))));
SchemaTableName table3 = new SchemaTableName("s1", "t3");
inSetupTransaction(session -> metadata.createTable(session, TPCH_CATALOG, new ConnectorTableMetadata(table3, ImmutableList.of(new ColumnMetadata("a", BIGINT), new ColumnMetadata("b", BIGINT), new ColumnMetadata("x", BIGINT, null, true)))));
// table in different catalog
SchemaTableName table4 = new SchemaTableName("s2", "t4");
inSetupTransaction(session -> metadata.createTable(session, SECOND_CATALOG, new ConnectorTableMetadata(table4, ImmutableList.of(new ColumnMetadata("a", BIGINT)))));
// table with a hidden column
SchemaTableName table5 = new SchemaTableName("s1", "t5");
inSetupTransaction(session -> metadata.createTable(session, TPCH_CATALOG, new ConnectorTableMetadata(table5, ImmutableList.of(new ColumnMetadata("a", BIGINT), new ColumnMetadata("b", BIGINT, null, true)))));
// table with a varchar column
SchemaTableName table6 = new SchemaTableName("s1", "t6");
inSetupTransaction(session -> metadata.createTable(session, TPCH_CATALOG, new ConnectorTableMetadata(table6, ImmutableList.of(new ColumnMetadata("a", BIGINT), new ColumnMetadata("b", VARCHAR), new ColumnMetadata("c", BIGINT), new ColumnMetadata("d", BIGINT)))));
// table with bigint, double, array of bigints and array of doubles column
SchemaTableName table7 = new SchemaTableName("s1", "t7");
inSetupTransaction(session -> metadata.createTable(session, TPCH_CATALOG, new ConnectorTableMetadata(table7, ImmutableList.of(new ColumnMetadata("a", BIGINT), new ColumnMetadata("b", DOUBLE), new ColumnMetadata("c", new ArrayType(BIGINT)), new ColumnMetadata("d", new ArrayType(DOUBLE))))));
// valid view referencing table in same schema
String viewData1 = JsonCodec.jsonCodec(ViewDefinition.class).toJson(new ViewDefinition("select a from t1", Optional.of(TPCH_CATALOG), Optional.of("s1"), ImmutableList.of(new ViewColumn("a", BIGINT)), Optional.of("user")));
inSetupTransaction(session -> metadata.createView(session, new QualifiedObjectName(TPCH_CATALOG, "s1", "v1"), viewData1, false));
// stale view (different column type)
String viewData2 = JsonCodec.jsonCodec(ViewDefinition.class).toJson(new ViewDefinition("select a from t1", Optional.of(TPCH_CATALOG), Optional.of("s1"), ImmutableList.of(new ViewColumn("a", VARCHAR)), Optional.of("user")));
inSetupTransaction(session -> metadata.createView(session, new QualifiedObjectName(TPCH_CATALOG, "s1", "v2"), viewData2, false));
// view referencing table in different schema from itself and session
String viewData3 = JsonCodec.jsonCodec(ViewDefinition.class).toJson(new ViewDefinition("select a from t4", Optional.of(SECOND_CATALOG), Optional.of("s2"), ImmutableList.of(new ViewColumn("a", BIGINT)), Optional.of("owner")));
inSetupTransaction(session -> metadata.createView(session, new QualifiedObjectName(THIRD_CATALOG, "s3", "v3"), viewData3, false));
// valid view with uppercase column name
String viewData4 = JsonCodec.jsonCodec(ViewDefinition.class).toJson(new ViewDefinition("select A from t1", Optional.of("tpch"), Optional.of("s1"), ImmutableList.of(new ViewColumn("a", BIGINT)), Optional.of("user")));
inSetupTransaction(session -> metadata.createView(session, new QualifiedObjectName("tpch", "s1", "v4"), viewData4, false));
// recursive view referencing to itself
String viewData5 = JsonCodec.jsonCodec(ViewDefinition.class).toJson(new ViewDefinition("select * from v5", Optional.of(TPCH_CATALOG), Optional.of("s1"), ImmutableList.of(new ViewColumn("a", BIGINT)), Optional.of("user")));
inSetupTransaction(session -> metadata.createView(session, new QualifiedObjectName(TPCH_CATALOG, "s1", "v5"), viewData5, false));
this.metadata = metadata;
}
use of com.facebook.presto.type.TypeRegistry in project presto by prestodb.
the class TestExpressionOptimizer method testPossibleExponentialOptimizationTime.
@Test(timeOut = 10_000)
public void testPossibleExponentialOptimizationTime() {
TypeRegistry typeManager = new TypeRegistry();
ExpressionOptimizer optimizer = new ExpressionOptimizer(new FunctionRegistry(typeManager, new BlockEncodingManager(typeManager), new FeaturesConfig()), typeManager, TEST_SESSION);
RowExpression expression = new ConstantExpression(1L, BIGINT);
for (int i = 0; i < 100; i++) {
Signature signature = internalOperator(OperatorType.ADD.name(), parseTypeSignature(StandardTypes.BIGINT), parseTypeSignature(StandardTypes.BIGINT), parseTypeSignature(StandardTypes.BIGINT));
expression = new CallExpression(signature, BIGINT, ImmutableList.of(expression, new ConstantExpression(1L, BIGINT)));
}
optimizer.optimize(expression);
}
Aggregations