use of io.trino.spi.type.TypeOperators in project trino by trinodb.
the class TestHivePageSink method createPageSink.
private static ConnectorPageSink createPageSink(HiveTransactionHandle transaction, HiveConfig config, HiveMetastore metastore, Path outputPath, HiveWriterStats stats) {
LocationHandle locationHandle = new LocationHandle(outputPath, outputPath, false, DIRECT_TO_TARGET_NEW_DIRECTORY);
HiveOutputTableHandle handle = new HiveOutputTableHandle(SCHEMA_NAME, TABLE_NAME, getColumnHandles(), new HivePageSinkMetadata(new SchemaTableName(SCHEMA_NAME, TABLE_NAME), metastore.getTable(SCHEMA_NAME, TABLE_NAME), ImmutableMap.of()), locationHandle, config.getHiveStorageFormat(), config.getHiveStorageFormat(), ImmutableList.of(), Optional.empty(), "test", ImmutableMap.of(), NO_ACID_TRANSACTION, false, false);
JsonCodec<PartitionUpdate> partitionUpdateCodec = JsonCodec.jsonCodec(PartitionUpdate.class);
TypeOperators typeOperators = new TypeOperators();
BlockTypeOperators blockTypeOperators = new BlockTypeOperators(typeOperators);
HivePageSinkProvider provider = new HivePageSinkProvider(getDefaultHiveFileWriterFactories(config, HDFS_ENVIRONMENT), HDFS_ENVIRONMENT, PAGE_SORTER, HiveMetastoreFactory.ofInstance(metastore), new GroupByHashPageIndexerFactory(new JoinCompiler(typeOperators), blockTypeOperators), TESTING_TYPE_MANAGER, config, new HiveLocationService(HDFS_ENVIRONMENT), partitionUpdateCodec, new TestingNodeManager("fake-environment"), new HiveEventClient(), getHiveSessionProperties(config), stats);
return provider.createPageSink(transaction, getHiveSession(config), handle);
}
use of io.trino.spi.type.TypeOperators in project trino by trinodb.
the class FunctionManager method createTestingFunctionManager.
public static FunctionManager createTestingFunctionManager() {
TypeOperators typeOperators = new TypeOperators();
GlobalFunctionCatalog functionCatalog = new GlobalFunctionCatalog();
functionCatalog.addFunctions(SystemFunctionBundle.create(new FeaturesConfig(), typeOperators, new BlockTypeOperators(typeOperators), UNKNOWN));
functionCatalog.addFunctions(new InternalFunctionBundle(new LiteralFunction(new InternalBlockEncodingSerde(new BlockEncodingManager(), TESTING_TYPE_MANAGER))));
return new FunctionManager(functionCatalog);
}
use of io.trino.spi.type.TypeOperators in project trino by trinodb.
the class TestDeltaLakePageSink method createPageSink.
private static ConnectorPageSink createPageSink(Path outputPath, DeltaLakeWriterStats stats) {
HiveTransactionHandle transaction = new HiveTransactionHandle(false);
DeltaLakeConfig deltaLakeConfig = new DeltaLakeConfig();
DeltaLakeOutputTableHandle tableHandle = new DeltaLakeOutputTableHandle(SCHEMA_NAME, TABLE_NAME, getColumnHandles(), outputPath.toString(), Optional.of(deltaLakeConfig.getDefaultCheckpointWritingInterval()), true);
DeltaLakePageSinkProvider provider = new DeltaLakePageSinkProvider(new GroupByHashPageIndexerFactory(new JoinCompiler(new TypeOperators()), new BlockTypeOperators()), HDFS_ENVIRONMENT, JsonCodec.jsonCodec(DataFileInfo.class), stats, deltaLakeConfig, new TestingTypeManager(), new NodeVersion("test-version"));
return provider.createPageSink(transaction, SESSION, tableHandle);
}
use of io.trino.spi.type.TypeOperators in project trino by trinodb.
the class TestDeltaLakeFileStatistics method testStatisticsValues.
private static void testStatisticsValues(DeltaLakeFileStatistics fileStatistics) {
assertEquals(fileStatistics.getNumRecords(), Optional.of(1L));
assertEquals(fileStatistics.getMinColumnValue(new DeltaLakeColumnHandle("byt", TINYINT, REGULAR)), Optional.of(42L));
assertEquals(fileStatistics.getMinColumnValue(new DeltaLakeColumnHandle("dat", DATE, REGULAR)), Optional.of(LocalDate.parse("5000-01-01").toEpochDay()));
assertEquals(fileStatistics.getMinColumnValue(new DeltaLakeColumnHandle("dec_long", DecimalType.createDecimalType(25, 3), REGULAR)), Optional.of(encodeScaledValue(new BigDecimal("999999999999.123"), 3)));
assertEquals(fileStatistics.getMinColumnValue(new DeltaLakeColumnHandle("dec_short", DecimalType.createDecimalType(5, 1), REGULAR)), Optional.of(new BigDecimal("10.1").unscaledValue().longValueExact()));
assertEquals(fileStatistics.getMinColumnValue(new DeltaLakeColumnHandle("dou", DoubleType.DOUBLE, REGULAR)), Optional.of(0.321));
assertEquals(fileStatistics.getMinColumnValue(new DeltaLakeColumnHandle("fl", REAL, REGULAR)), Optional.of((long) floatToIntBits(0.123f)));
assertEquals(fileStatistics.getMinColumnValue(new DeltaLakeColumnHandle("in", INTEGER, REGULAR)), Optional.of(20000000L));
assertEquals(fileStatistics.getMinColumnValue(new DeltaLakeColumnHandle("l", BIGINT, REGULAR)), Optional.of(10000000L));
Type rowType = RowType.rowType(RowType.field("s1", INTEGER), RowType.field("s3", VarcharType.createUnboundedVarcharType()));
assertEquals(fileStatistics.getMinColumnValue(new DeltaLakeColumnHandle("row", rowType, REGULAR)), Optional.empty());
assertEquals(fileStatistics.getMinColumnValue(new DeltaLakeColumnHandle("arr", new ArrayType(INTEGER), REGULAR)), Optional.empty());
assertEquals(fileStatistics.getMinColumnValue(new DeltaLakeColumnHandle("m", new MapType(INTEGER, VarcharType.createUnboundedVarcharType(), new TypeOperators()), REGULAR)), Optional.empty());
assertEquals(fileStatistics.getMinColumnValue(new DeltaLakeColumnHandle("sh", SMALLINT, REGULAR)), Optional.of(123L));
assertEquals(fileStatistics.getMinColumnValue(new DeltaLakeColumnHandle("str", VarcharType.createUnboundedVarcharType(), REGULAR)), Optional.of(utf8Slice("a")));
assertEquals(fileStatistics.getMinColumnValue(new DeltaLakeColumnHandle("ts", TIMESTAMP_TZ_MILLIS, REGULAR)), Optional.of(packDateTimeWithZone(LocalDateTime.parse("2960-10-31T01:00:00.000").toInstant(UTC).toEpochMilli(), UTC_KEY)));
assertEquals(fileStatistics.getMinColumnValue(new DeltaLakeColumnHandle("bool", BOOLEAN, REGULAR)), Optional.empty());
assertEquals(fileStatistics.getMinColumnValue(new DeltaLakeColumnHandle("bin", VARBINARY, REGULAR)), Optional.empty());
}
use of io.trino.spi.type.TypeOperators in project trino by trinodb.
the class AbstractTestHiveFileSystem method setup.
protected void setup(String host, int port, String databaseName, boolean s3SelectPushdownEnabled, HdfsConfiguration hdfsConfiguration) {
database = databaseName;
table = new SchemaTableName(database, "trino_test_external_fs");
tableWithHeader = new SchemaTableName(database, "trino_test_external_fs_with_header");
tableWithHeaderAndFooter = new SchemaTableName(database, "trino_test_external_fs_with_header_and_footer");
String random = randomUUID().toString().toLowerCase(ENGLISH).replace("-", "");
temporaryCreateTable = new SchemaTableName(database, "tmp_trino_test_create_" + random);
config = new HiveConfig().setS3SelectPushdownEnabled(s3SelectPushdownEnabled);
Optional<HostAndPort> proxy = Optional.ofNullable(System.getProperty("hive.metastore.thrift.client.socks-proxy")).map(HostAndPort::fromString);
MetastoreLocator metastoreLocator = new TestingMetastoreLocator(proxy, HostAndPort.fromParts(host, port));
HivePartitionManager hivePartitionManager = new HivePartitionManager(config);
hdfsEnvironment = new HdfsEnvironment(hdfsConfiguration, new HdfsConfig(), new NoHdfsAuthentication());
MetastoreConfig metastoreConfig = new MetastoreConfig();
metastoreClient = new TestingHiveMetastore(new BridgingHiveMetastore(new ThriftHiveMetastore(metastoreLocator, new HiveConfig(), metastoreConfig, new ThriftMetastoreConfig(), hdfsEnvironment, false), new HiveIdentity(getHiveSession(config).getIdentity())), getBasePath(), hdfsEnvironment);
locationService = new HiveLocationService(hdfsEnvironment);
JsonCodec<PartitionUpdate> partitionUpdateCodec = JsonCodec.jsonCodec(PartitionUpdate.class);
metadataFactory = new HiveMetadataFactory(new CatalogName("hive"), config, metastoreConfig, HiveMetastoreFactory.ofInstance(metastoreClient), hdfsEnvironment, hivePartitionManager, newDirectExecutorService(), heartbeatService, TESTING_TYPE_MANAGER, NOOP_METADATA_PROVIDER, locationService, partitionUpdateCodec, new NodeVersion("test_version"), new NoneHiveRedirectionsProvider(), ImmutableSet.of(new PartitionsSystemTableProvider(hivePartitionManager, TESTING_TYPE_MANAGER), new PropertiesSystemTableProvider()), new DefaultHiveMaterializedViewMetadataFactory(), SqlStandardAccessControlMetadata::new, NO_REDIRECTIONS, TableInvalidationCallback.NOOP);
transactionManager = new HiveTransactionManager(metadataFactory);
splitManager = new HiveSplitManager(transactionManager, hivePartitionManager, new NamenodeStats(), hdfsEnvironment, new CachingDirectoryLister(new HiveConfig()), new BoundedExecutor(executor, config.getMaxSplitIteratorThreads()), new CounterStat(), config.getMaxOutstandingSplits(), config.getMaxOutstandingSplitsSize(), config.getMinPartitionBatchSize(), config.getMaxPartitionBatchSize(), config.getMaxInitialSplits(), config.getSplitLoaderConcurrency(), config.getMaxSplitsPerSecond(), config.getRecursiveDirWalkerEnabled(), TESTING_TYPE_MANAGER);
TypeOperators typeOperators = new TypeOperators();
BlockTypeOperators blockTypeOperators = new BlockTypeOperators(typeOperators);
pageSinkProvider = new HivePageSinkProvider(getDefaultHiveFileWriterFactories(config, hdfsEnvironment), hdfsEnvironment, PAGE_SORTER, HiveMetastoreFactory.ofInstance(metastoreClient), new GroupByHashPageIndexerFactory(new JoinCompiler(typeOperators), blockTypeOperators), TESTING_TYPE_MANAGER, config, locationService, partitionUpdateCodec, new TestingNodeManager("fake-environment"), new HiveEventClient(), getHiveSessionProperties(config), new HiveWriterStats());
pageSourceProvider = new HivePageSourceProvider(TESTING_TYPE_MANAGER, hdfsEnvironment, config, getDefaultHivePageSourceFactories(hdfsEnvironment, config), getDefaultHiveRecordCursorProviders(config, hdfsEnvironment), new GenericHiveRecordCursorProvider(hdfsEnvironment, config), Optional.empty());
onSetupComplete();
}
Aggregations