Search in sources :

Example 26 with TypeOperators

use of io.trino.spi.type.TypeOperators in project trino by trinodb.

the class TestHivePageSink method createPageSink.

private static ConnectorPageSink createPageSink(HiveTransactionHandle transaction, HiveConfig config, HiveMetastore metastore, Path outputPath, HiveWriterStats stats) {
    LocationHandle locationHandle = new LocationHandle(outputPath, outputPath, false, DIRECT_TO_TARGET_NEW_DIRECTORY);
    HiveOutputTableHandle handle = new HiveOutputTableHandle(SCHEMA_NAME, TABLE_NAME, getColumnHandles(), new HivePageSinkMetadata(new SchemaTableName(SCHEMA_NAME, TABLE_NAME), metastore.getTable(SCHEMA_NAME, TABLE_NAME), ImmutableMap.of()), locationHandle, config.getHiveStorageFormat(), config.getHiveStorageFormat(), ImmutableList.of(), Optional.empty(), "test", ImmutableMap.of(), NO_ACID_TRANSACTION, false, false);
    JsonCodec<PartitionUpdate> partitionUpdateCodec = JsonCodec.jsonCodec(PartitionUpdate.class);
    TypeOperators typeOperators = new TypeOperators();
    BlockTypeOperators blockTypeOperators = new BlockTypeOperators(typeOperators);
    HivePageSinkProvider provider = new HivePageSinkProvider(getDefaultHiveFileWriterFactories(config, HDFS_ENVIRONMENT), HDFS_ENVIRONMENT, PAGE_SORTER, HiveMetastoreFactory.ofInstance(metastore), new GroupByHashPageIndexerFactory(new JoinCompiler(typeOperators), blockTypeOperators), TESTING_TYPE_MANAGER, config, new HiveLocationService(HDFS_ENVIRONMENT), partitionUpdateCodec, new TestingNodeManager("fake-environment"), new HiveEventClient(), getHiveSessionProperties(config), stats);
    return provider.createPageSink(transaction, getHiveSession(config), handle);
}
Also used : JoinCompiler(io.trino.sql.gen.JoinCompiler) HivePageSinkMetadata(io.trino.plugin.hive.metastore.HivePageSinkMetadata) SchemaTableName(io.trino.spi.connector.SchemaTableName) BlockTypeOperators(io.trino.type.BlockTypeOperators) TestingNodeManager(io.trino.testing.TestingNodeManager) GroupByHashPageIndexerFactory(io.trino.operator.GroupByHashPageIndexerFactory) TypeOperators(io.trino.spi.type.TypeOperators) BlockTypeOperators(io.trino.type.BlockTypeOperators)

Example 27 with TypeOperators

use of io.trino.spi.type.TypeOperators in project trino by trinodb.

the class FunctionManager method createTestingFunctionManager.

public static FunctionManager createTestingFunctionManager() {
    TypeOperators typeOperators = new TypeOperators();
    GlobalFunctionCatalog functionCatalog = new GlobalFunctionCatalog();
    functionCatalog.addFunctions(SystemFunctionBundle.create(new FeaturesConfig(), typeOperators, new BlockTypeOperators(typeOperators), UNKNOWN));
    functionCatalog.addFunctions(new InternalFunctionBundle(new LiteralFunction(new InternalBlockEncodingSerde(new BlockEncodingManager(), TESTING_TYPE_MANAGER))));
    return new FunctionManager(functionCatalog);
}
Also used : BlockTypeOperators(io.trino.type.BlockTypeOperators) FeaturesConfig(io.trino.FeaturesConfig) TypeOperators(io.trino.spi.type.TypeOperators) BlockTypeOperators(io.trino.type.BlockTypeOperators)

Example 28 with TypeOperators

use of io.trino.spi.type.TypeOperators in project trino by trinodb.

the class TestDeltaLakePageSink method createPageSink.

private static ConnectorPageSink createPageSink(Path outputPath, DeltaLakeWriterStats stats) {
    HiveTransactionHandle transaction = new HiveTransactionHandle(false);
    DeltaLakeConfig deltaLakeConfig = new DeltaLakeConfig();
    DeltaLakeOutputTableHandle tableHandle = new DeltaLakeOutputTableHandle(SCHEMA_NAME, TABLE_NAME, getColumnHandles(), outputPath.toString(), Optional.of(deltaLakeConfig.getDefaultCheckpointWritingInterval()), true);
    DeltaLakePageSinkProvider provider = new DeltaLakePageSinkProvider(new GroupByHashPageIndexerFactory(new JoinCompiler(new TypeOperators()), new BlockTypeOperators()), HDFS_ENVIRONMENT, JsonCodec.jsonCodec(DataFileInfo.class), stats, deltaLakeConfig, new TestingTypeManager(), new NodeVersion("test-version"));
    return provider.createPageSink(transaction, SESSION, tableHandle);
}
Also used : NodeVersion(io.trino.plugin.hive.NodeVersion) JoinCompiler(io.trino.sql.gen.JoinCompiler) BlockTypeOperators(io.trino.type.BlockTypeOperators) HiveTransactionHandle(io.trino.plugin.hive.HiveTransactionHandle) GroupByHashPageIndexerFactory(io.trino.operator.GroupByHashPageIndexerFactory) TestingTypeManager(io.trino.spi.type.TestingTypeManager) TypeOperators(io.trino.spi.type.TypeOperators) BlockTypeOperators(io.trino.type.BlockTypeOperators)

Example 29 with TypeOperators

use of io.trino.spi.type.TypeOperators in project trino by trinodb.

the class TestDeltaLakeFileStatistics method testStatisticsValues.

private static void testStatisticsValues(DeltaLakeFileStatistics fileStatistics) {
    assertEquals(fileStatistics.getNumRecords(), Optional.of(1L));
    assertEquals(fileStatistics.getMinColumnValue(new DeltaLakeColumnHandle("byt", TINYINT, REGULAR)), Optional.of(42L));
    assertEquals(fileStatistics.getMinColumnValue(new DeltaLakeColumnHandle("dat", DATE, REGULAR)), Optional.of(LocalDate.parse("5000-01-01").toEpochDay()));
    assertEquals(fileStatistics.getMinColumnValue(new DeltaLakeColumnHandle("dec_long", DecimalType.createDecimalType(25, 3), REGULAR)), Optional.of(encodeScaledValue(new BigDecimal("999999999999.123"), 3)));
    assertEquals(fileStatistics.getMinColumnValue(new DeltaLakeColumnHandle("dec_short", DecimalType.createDecimalType(5, 1), REGULAR)), Optional.of(new BigDecimal("10.1").unscaledValue().longValueExact()));
    assertEquals(fileStatistics.getMinColumnValue(new DeltaLakeColumnHandle("dou", DoubleType.DOUBLE, REGULAR)), Optional.of(0.321));
    assertEquals(fileStatistics.getMinColumnValue(new DeltaLakeColumnHandle("fl", REAL, REGULAR)), Optional.of((long) floatToIntBits(0.123f)));
    assertEquals(fileStatistics.getMinColumnValue(new DeltaLakeColumnHandle("in", INTEGER, REGULAR)), Optional.of(20000000L));
    assertEquals(fileStatistics.getMinColumnValue(new DeltaLakeColumnHandle("l", BIGINT, REGULAR)), Optional.of(10000000L));
    Type rowType = RowType.rowType(RowType.field("s1", INTEGER), RowType.field("s3", VarcharType.createUnboundedVarcharType()));
    assertEquals(fileStatistics.getMinColumnValue(new DeltaLakeColumnHandle("row", rowType, REGULAR)), Optional.empty());
    assertEquals(fileStatistics.getMinColumnValue(new DeltaLakeColumnHandle("arr", new ArrayType(INTEGER), REGULAR)), Optional.empty());
    assertEquals(fileStatistics.getMinColumnValue(new DeltaLakeColumnHandle("m", new MapType(INTEGER, VarcharType.createUnboundedVarcharType(), new TypeOperators()), REGULAR)), Optional.empty());
    assertEquals(fileStatistics.getMinColumnValue(new DeltaLakeColumnHandle("sh", SMALLINT, REGULAR)), Optional.of(123L));
    assertEquals(fileStatistics.getMinColumnValue(new DeltaLakeColumnHandle("str", VarcharType.createUnboundedVarcharType(), REGULAR)), Optional.of(utf8Slice("a")));
    assertEquals(fileStatistics.getMinColumnValue(new DeltaLakeColumnHandle("ts", TIMESTAMP_TZ_MILLIS, REGULAR)), Optional.of(packDateTimeWithZone(LocalDateTime.parse("2960-10-31T01:00:00.000").toInstant(UTC).toEpochMilli(), UTC_KEY)));
    assertEquals(fileStatistics.getMinColumnValue(new DeltaLakeColumnHandle("bool", BOOLEAN, REGULAR)), Optional.empty());
    assertEquals(fileStatistics.getMinColumnValue(new DeltaLakeColumnHandle("bin", VARBINARY, REGULAR)), Optional.empty());
}
Also used : ArrayType(io.trino.spi.type.ArrayType) RowType(io.trino.spi.type.RowType) ArrayType(io.trino.spi.type.ArrayType) DecimalType(io.trino.spi.type.DecimalType) DoubleType(io.trino.spi.type.DoubleType) Type(io.trino.spi.type.Type) VarcharType(io.trino.spi.type.VarcharType) MapType(io.trino.spi.type.MapType) DeltaLakeColumnHandle(io.trino.plugin.deltalake.DeltaLakeColumnHandle) BigDecimal(java.math.BigDecimal) MapType(io.trino.spi.type.MapType) TypeOperators(io.trino.spi.type.TypeOperators)

Example 30 with TypeOperators

use of io.trino.spi.type.TypeOperators in project trino by trinodb.

the class AbstractTestHiveFileSystem method setup.

protected void setup(String host, int port, String databaseName, boolean s3SelectPushdownEnabled, HdfsConfiguration hdfsConfiguration) {
    database = databaseName;
    table = new SchemaTableName(database, "trino_test_external_fs");
    tableWithHeader = new SchemaTableName(database, "trino_test_external_fs_with_header");
    tableWithHeaderAndFooter = new SchemaTableName(database, "trino_test_external_fs_with_header_and_footer");
    String random = randomUUID().toString().toLowerCase(ENGLISH).replace("-", "");
    temporaryCreateTable = new SchemaTableName(database, "tmp_trino_test_create_" + random);
    config = new HiveConfig().setS3SelectPushdownEnabled(s3SelectPushdownEnabled);
    Optional<HostAndPort> proxy = Optional.ofNullable(System.getProperty("hive.metastore.thrift.client.socks-proxy")).map(HostAndPort::fromString);
    MetastoreLocator metastoreLocator = new TestingMetastoreLocator(proxy, HostAndPort.fromParts(host, port));
    HivePartitionManager hivePartitionManager = new HivePartitionManager(config);
    hdfsEnvironment = new HdfsEnvironment(hdfsConfiguration, new HdfsConfig(), new NoHdfsAuthentication());
    MetastoreConfig metastoreConfig = new MetastoreConfig();
    metastoreClient = new TestingHiveMetastore(new BridgingHiveMetastore(new ThriftHiveMetastore(metastoreLocator, new HiveConfig(), metastoreConfig, new ThriftMetastoreConfig(), hdfsEnvironment, false), new HiveIdentity(getHiveSession(config).getIdentity())), getBasePath(), hdfsEnvironment);
    locationService = new HiveLocationService(hdfsEnvironment);
    JsonCodec<PartitionUpdate> partitionUpdateCodec = JsonCodec.jsonCodec(PartitionUpdate.class);
    metadataFactory = new HiveMetadataFactory(new CatalogName("hive"), config, metastoreConfig, HiveMetastoreFactory.ofInstance(metastoreClient), hdfsEnvironment, hivePartitionManager, newDirectExecutorService(), heartbeatService, TESTING_TYPE_MANAGER, NOOP_METADATA_PROVIDER, locationService, partitionUpdateCodec, new NodeVersion("test_version"), new NoneHiveRedirectionsProvider(), ImmutableSet.of(new PartitionsSystemTableProvider(hivePartitionManager, TESTING_TYPE_MANAGER), new PropertiesSystemTableProvider()), new DefaultHiveMaterializedViewMetadataFactory(), SqlStandardAccessControlMetadata::new, NO_REDIRECTIONS, TableInvalidationCallback.NOOP);
    transactionManager = new HiveTransactionManager(metadataFactory);
    splitManager = new HiveSplitManager(transactionManager, hivePartitionManager, new NamenodeStats(), hdfsEnvironment, new CachingDirectoryLister(new HiveConfig()), new BoundedExecutor(executor, config.getMaxSplitIteratorThreads()), new CounterStat(), config.getMaxOutstandingSplits(), config.getMaxOutstandingSplitsSize(), config.getMinPartitionBatchSize(), config.getMaxPartitionBatchSize(), config.getMaxInitialSplits(), config.getSplitLoaderConcurrency(), config.getMaxSplitsPerSecond(), config.getRecursiveDirWalkerEnabled(), TESTING_TYPE_MANAGER);
    TypeOperators typeOperators = new TypeOperators();
    BlockTypeOperators blockTypeOperators = new BlockTypeOperators(typeOperators);
    pageSinkProvider = new HivePageSinkProvider(getDefaultHiveFileWriterFactories(config, hdfsEnvironment), hdfsEnvironment, PAGE_SORTER, HiveMetastoreFactory.ofInstance(metastoreClient), new GroupByHashPageIndexerFactory(new JoinCompiler(typeOperators), blockTypeOperators), TESTING_TYPE_MANAGER, config, locationService, partitionUpdateCodec, new TestingNodeManager("fake-environment"), new HiveEventClient(), getHiveSessionProperties(config), new HiveWriterStats());
    pageSourceProvider = new HivePageSourceProvider(TESTING_TYPE_MANAGER, hdfsEnvironment, config, getDefaultHivePageSourceFactories(hdfsEnvironment, config), getDefaultHiveRecordCursorProviders(config, hdfsEnvironment), new GenericHiveRecordCursorProvider(hdfsEnvironment, config), Optional.empty());
    onSetupComplete();
}
Also used : TestingMetastoreLocator(io.trino.plugin.hive.metastore.thrift.TestingMetastoreLocator) CounterStat(io.airlift.stats.CounterStat) MetastoreLocator(io.trino.plugin.hive.metastore.thrift.MetastoreLocator) TestingMetastoreLocator(io.trino.plugin.hive.metastore.thrift.TestingMetastoreLocator) ThriftMetastoreConfig(io.trino.plugin.hive.metastore.thrift.ThriftMetastoreConfig) NoHdfsAuthentication(io.trino.plugin.hive.authentication.NoHdfsAuthentication) HiveIdentity(io.trino.plugin.hive.authentication.HiveIdentity) HostAndPort(com.google.common.net.HostAndPort) BlockTypeOperators(io.trino.type.BlockTypeOperators) TestingNodeManager(io.trino.testing.TestingNodeManager) BridgingHiveMetastore(io.trino.plugin.hive.metastore.thrift.BridgingHiveMetastore) TypeOperators(io.trino.spi.type.TypeOperators) BlockTypeOperators(io.trino.type.BlockTypeOperators) JoinCompiler(io.trino.sql.gen.JoinCompiler) ThriftMetastoreConfig(io.trino.plugin.hive.metastore.thrift.ThriftMetastoreConfig) MetastoreConfig(io.trino.plugin.hive.metastore.MetastoreConfig) ThriftHiveMetastore(io.trino.plugin.hive.metastore.thrift.ThriftHiveMetastore) SchemaTableName(io.trino.spi.connector.SchemaTableName) BoundedExecutor(io.airlift.concurrent.BoundedExecutor) CatalogName(io.trino.plugin.base.CatalogName) GroupByHashPageIndexerFactory(io.trino.operator.GroupByHashPageIndexerFactory)

Aggregations

TypeOperators (io.trino.spi.type.TypeOperators)37 BlockTypeOperators (io.trino.type.BlockTypeOperators)23 Test (org.testng.annotations.Test)18 MapType (io.trino.spi.type.MapType)10 Type (io.trino.spi.type.Type)9 BeforeMethod (org.testng.annotations.BeforeMethod)9 Page (io.trino.spi.Page)8 JoinCompiler (io.trino.sql.gen.JoinCompiler)6 PlanNodeId (io.trino.sql.planner.plan.PlanNodeId)6 ImmutableList (com.google.common.collect.ImmutableList)4 FeaturesConfig (io.trino.FeaturesConfig)4 Driver (io.trino.operator.Driver)3 DriverFactory (io.trino.operator.DriverFactory)3 GroupByHashPageIndexerFactory (io.trino.operator.GroupByHashPageIndexerFactory)3 OperatorFactory (io.trino.operator.OperatorFactory)3 PagesIndex (io.trino.operator.PagesIndex)3 HashBuilderOperatorFactory (io.trino.operator.join.HashBuilderOperator.HashBuilderOperatorFactory)3 PartitionedLookupSourceFactory (io.trino.operator.join.PartitionedLookupSourceFactory)3 Block (io.trino.spi.block.Block)3 ImmutableList.toImmutableList (com.google.common.collect.ImmutableList.toImmutableList)2