Search in sources :

Example 16 with INTEGER

use of io.prestosql.spi.type.IntegerType.INTEGER in project boostkit-bigdata by kunpengcompute.

the class TestMergingPageIterator method testMerging.

@Test
public void testMerging() {
    List<Type> types = ImmutableList.of(INTEGER, INTEGER);
    List<Integer> sortIndexes = ImmutableList.of(1);
    List<SortOrder> sortOrders = ImmutableList.of(SortOrder.ASC_NULLS_FIRST);
    List<List<Page>> pageLists = new ArrayList<>();
    PageBuilder pageBuilder = new PageBuilder(types);
    for (int i = 0; i < 10; i++) {
        Iterator<Integer> values = IntStream.range(0, 1000).map(ignored -> ThreadLocalRandom.current().nextInt(100_000)).mapToObj(n -> ((n % 100) == 0) ? null : n).sorted(nullsFirst(naturalOrder())).iterator();
        List<Page> pages = new ArrayList<>();
        for (int j = 0; j < 10; j++) {
            for (int k = 0; k < 100; k++) {
                Integer n = values.next();
                pageBuilder.declarePosition();
                if (n == null) {
                    pageBuilder.getBlockBuilder(0).appendNull();
                    pageBuilder.getBlockBuilder(1).appendNull();
                } else {
                    INTEGER.writeLong(pageBuilder.getBlockBuilder(0), n);
                    INTEGER.writeLong(pageBuilder.getBlockBuilder(1), n * 22L);
                }
            }
            pages.add(pageBuilder.build());
            pageBuilder.reset();
        }
        pageLists.add(pages);
        assertFalse(values.hasNext());
    }
    List<Iterator<Page>> pages = pageLists.stream().map(List::iterator).collect(toList());
    Iterator<Page> iterator = new MergingPageIterator(pages, types, sortIndexes, sortOrders);
    List<Long> values = new ArrayList<>();
    while (iterator.hasNext()) {
        Page page = iterator.next();
        for (int i = 0; i < page.getPositionCount(); i++) {
            if (page.getBlock(0).isNull(i)) {
                assertTrue(page.getBlock(1).isNull(i));
                values.add(null);
            } else {
                long x = INTEGER.getLong(page.getBlock(0), i);
                long y = INTEGER.getLong(page.getBlock(1), i);
                assertEquals(y, x * 22);
                values.add(x);
            }
        }
    }
    assertThat(values).isSortedAccordingTo(nullsFirst(naturalOrder()));
}
Also used : IntStream(java.util.stream.IntStream) Comparator.nullsFirst(java.util.Comparator.nullsFirst) Iterator(java.util.Iterator) Comparator.naturalOrder(java.util.Comparator.naturalOrder) Assertions.assertThat(org.assertj.core.api.Assertions.assertThat) Assert.assertEquals(org.testng.Assert.assertEquals) Page(io.prestosql.spi.Page) Test(org.testng.annotations.Test) INTEGER(io.prestosql.spi.type.IntegerType.INTEGER) PageBuilder(io.prestosql.spi.PageBuilder) SortOrder(io.prestosql.spi.block.SortOrder) ArrayList(java.util.ArrayList) List(java.util.List) Collectors.toList(java.util.stream.Collectors.toList) ImmutableList(com.google.common.collect.ImmutableList) ThreadLocalRandom(java.util.concurrent.ThreadLocalRandom) Assert.assertTrue(org.testng.Assert.assertTrue) Type(io.prestosql.spi.type.Type) Assert.assertFalse(org.testng.Assert.assertFalse) ArrayList(java.util.ArrayList) SortOrder(io.prestosql.spi.block.SortOrder) Page(io.prestosql.spi.Page) PageBuilder(io.prestosql.spi.PageBuilder) Type(io.prestosql.spi.type.Type) Iterator(java.util.Iterator) ArrayList(java.util.ArrayList) List(java.util.List) Collectors.toList(java.util.stream.Collectors.toList) ImmutableList(com.google.common.collect.ImmutableList) Test(org.testng.annotations.Test)

Example 17 with INTEGER

use of io.prestosql.spi.type.IntegerType.INTEGER in project hetu-core by openlookeng.

the class HiveFileFormatBenchmark method createTpchDataSet.

private static <E extends TpchEntity> TestData createTpchDataSet(FileFormat format, TpchTable<E> tpchTable, List<TpchColumn<E>> columns) {
    List<String> columnNames = columns.stream().map(TpchColumn::getColumnName).collect(toList());
    List<Type> columnTypes = columns.stream().map(HiveFileFormatBenchmark::getColumnType).map(type -> format.supportsDate() || !DATE.equals(type) ? type : createUnboundedVarcharType()).collect(toList());
    PageBuilder pageBuilder = new PageBuilder(columnTypes);
    ImmutableList.Builder<Page> pages = ImmutableList.builder();
    long dataSize = 0;
    for (E row : tpchTable.createGenerator(10, 1, 1)) {
        pageBuilder.declarePosition();
        for (int i = 0; i < columns.size(); i++) {
            TpchColumn<E> column = columns.get(i);
            BlockBuilder blockBuilder = pageBuilder.getBlockBuilder(i);
            switch(column.getType().getBase()) {
                case IDENTIFIER:
                    BIGINT.writeLong(blockBuilder, column.getIdentifier(row));
                    break;
                case INTEGER:
                    INTEGER.writeLong(blockBuilder, column.getInteger(row));
                    break;
                case DATE:
                    if (format.supportsDate()) {
                        DATE.writeLong(blockBuilder, column.getDate(row));
                    } else {
                        createUnboundedVarcharType().writeString(blockBuilder, column.getString(row));
                    }
                    break;
                case DOUBLE:
                    DOUBLE.writeDouble(blockBuilder, column.getDouble(row));
                    break;
                case VARCHAR:
                    createUnboundedVarcharType().writeSlice(blockBuilder, Slices.utf8Slice(column.getString(row)));
                    break;
                default:
                    throw new IllegalArgumentException("Unsupported type " + column.getType());
            }
        }
        if (pageBuilder.isFull()) {
            Page page = pageBuilder.build();
            pages.add(page);
            pageBuilder.reset();
            dataSize += page.getSizeInBytes();
            if (dataSize >= MIN_DATA_SIZE) {
                break;
            }
        }
    }
    return new TestData(columnNames, columnTypes, pages.build());
}
Also used : MoreFiles.deleteRecursively(com.google.common.io.MoreFiles.deleteRecursively) RunResult(org.openjdk.jmh.results.RunResult) LINE_ITEM(io.airlift.tpch.TpchTable.LINE_ITEM) Random(java.util.Random) Warmup(org.openjdk.jmh.annotations.Warmup) ParquetFileWriterConfig(io.prestosql.plugin.hive.ParquetFileWriterConfig) ConnectorSession(io.prestosql.spi.connector.ConnectorSession) HiveConfig(io.prestosql.plugin.hive.HiveConfig) OutputTimeUnit(org.openjdk.jmh.annotations.OutputTimeUnit) Files.createTempDirectory(java.nio.file.Files.createTempDirectory) Slices(io.airlift.slice.Slices) HiveTestUtils(io.prestosql.plugin.hive.HiveTestUtils) TearDown(org.openjdk.jmh.annotations.TearDown) Type(io.prestosql.spi.type.Type) BIGINT(io.prestosql.spi.type.BigintType.BIGINT) Setup(org.openjdk.jmh.annotations.Setup) BlockBuilder(io.prestosql.spi.block.BlockBuilder) Param(org.openjdk.jmh.annotations.Param) ArrayType(io.prestosql.spi.type.ArrayType) Collection(java.util.Collection) UUID(java.util.UUID) PageBuilder(io.prestosql.spi.PageBuilder) OrcFileWriterConfig(io.prestosql.plugin.hive.OrcFileWriterConfig) String.format(java.lang.String.format) TpchTable(io.airlift.tpch.TpchTable) UncheckedIOException(java.io.UncheckedIOException) TpchEntity(io.airlift.tpch.TpchEntity) OptionsBuilder(org.openjdk.jmh.runner.options.OptionsBuilder) DataSize(io.airlift.units.DataSize) List(java.util.List) ConnectorPageSource(io.prestosql.spi.connector.ConnectorPageSource) VarcharType.createUnboundedVarcharType(io.prestosql.spi.type.VarcharType.createUnboundedVarcharType) HiveCompressionCodec(io.prestosql.plugin.hive.HiveCompressionCodec) Options(org.openjdk.jmh.runner.options.Options) TpchColumn(io.airlift.tpch.TpchColumn) Measurement(org.openjdk.jmh.annotations.Measurement) HiveSessionProperties(io.prestosql.plugin.hive.HiveSessionProperties) ORDERS(io.airlift.tpch.TpchTable.ORDERS) MEGABYTE(io.airlift.units.DataSize.Unit.MEGABYTE) Scope(org.openjdk.jmh.annotations.Scope) INTEGER(io.prestosql.spi.type.IntegerType.INTEGER) OrderColumn(io.airlift.tpch.OrderColumn) ArrayList(java.util.ArrayList) HdfsEnvironment(io.prestosql.plugin.hive.HdfsEnvironment) Statistics(org.openjdk.jmh.util.Statistics) AuxCounters(org.openjdk.jmh.annotations.AuxCounters) ALLOW_INSECURE(com.google.common.io.RecursiveDeleteOption.ALLOW_INSECURE) ImmutableList(com.google.common.collect.ImmutableList) DOUBLE(io.prestosql.spi.type.DoubleType.DOUBLE) DATE(io.prestosql.spi.type.DateType.DATE) Runner(org.openjdk.jmh.runner.Runner) HadoopNative(io.prestosql.hadoop.HadoopNative) Page(io.prestosql.spi.Page) IOException(java.io.IOException) State(org.openjdk.jmh.annotations.State) Benchmark(org.openjdk.jmh.annotations.Benchmark) File(java.io.File) TimeUnit(java.util.concurrent.TimeUnit) Collectors.toList(java.util.stream.Collectors.toList) Fork(org.openjdk.jmh.annotations.Fork) TestingConnectorSession(io.prestosql.testing.TestingConnectorSession) IntArrays(it.unimi.dsi.fastutil.ints.IntArrays) MEGABYTE(io.airlift.units.DataSize.Unit.MEGABYTE) ALLOW_INSECURE(com.google.common.io.RecursiveDeleteOption.ALLOW_INSECURE) DOUBLE(io.prestosql.spi.type.DoubleType.DOUBLE) DATE(io.prestosql.spi.type.DateType.DATE) ImmutableList(com.google.common.collect.ImmutableList) Page(io.prestosql.spi.Page) PageBuilder(io.prestosql.spi.PageBuilder) Type(io.prestosql.spi.type.Type) ArrayType(io.prestosql.spi.type.ArrayType) VarcharType.createUnboundedVarcharType(io.prestosql.spi.type.VarcharType.createUnboundedVarcharType) BlockBuilder(io.prestosql.spi.block.BlockBuilder)

Example 18 with INTEGER

use of io.prestosql.spi.type.IntegerType.INTEGER in project hetu-core by openlookeng.

the class ExtractSpatialJoins method tryCreateSpatialJoin.

private static Result tryCreateSpatialJoin(Context context, JoinNode joinNode, RowExpression filter, PlanNodeId nodeId, List<Symbol> outputSymbols, CallExpression spatialFunction, Optional<RowExpression> radius, Metadata metadata, SplitManager splitManager, PageSourceManager pageSourceManager, TypeAnalyzer typeAnalyzer) {
    // TODO Add support for distributed left spatial joins
    Optional<String> spatialPartitioningTableName = joinNode.getType() == INNER ? getSpatialPartitioningTableName(context.getSession()) : Optional.empty();
    Optional<KdbTree> kdbTree = spatialPartitioningTableName.map(tableName -> loadKdbTree(tableName, context.getSession(), metadata, splitManager, pageSourceManager, nodeId));
    List<RowExpression> arguments = spatialFunction.getArguments();
    verify(arguments.size() == 2);
    RowExpression firstArgument = arguments.get(0);
    RowExpression secondArgument = arguments.get(1);
    Type sphericalGeographyType = metadata.getType(SPHERICAL_GEOGRAPHY_TYPE_SIGNATURE);
    if (firstArgument.getType().equals(sphericalGeographyType) || secondArgument.getType().equals(sphericalGeographyType)) {
        if (joinNode.getType() != INNER) {
            return Result.empty();
        }
    }
    Set<Symbol> firstSymbols = extractUnique(firstArgument);
    Set<Symbol> secondSymbols = extractUnique(secondArgument);
    if (firstSymbols.isEmpty() || secondSymbols.isEmpty()) {
        return Result.empty();
    }
    Optional<Symbol> newFirstSymbol = newGeometrySymbol(context, firstArgument);
    Optional<Symbol> newSecondSymbol = newGeometrySymbol(context, secondArgument);
    PlanNode leftNode = joinNode.getLeft();
    PlanNode rightNode = joinNode.getRight();
    PlanNode newLeftNode;
    PlanNode newRightNode;
    // Check if the order of arguments of the spatial function matches the order of join sides
    int alignment = checkAlignment(joinNode, firstSymbols, secondSymbols);
    if (alignment > 0) {
        newLeftNode = newFirstSymbol.map(symbol -> addProjection(context, leftNode, symbol, firstArgument)).orElse(leftNode);
        newRightNode = newSecondSymbol.map(symbol -> addProjection(context, rightNode, symbol, secondArgument)).orElse(rightNode);
    } else if (alignment < 0) {
        newLeftNode = newSecondSymbol.map(symbol -> addProjection(context, leftNode, symbol, secondArgument)).orElse(leftNode);
        newRightNode = newFirstSymbol.map(symbol -> addProjection(context, rightNode, symbol, firstArgument)).orElse(rightNode);
    } else {
        return Result.empty();
    }
    RowExpression newFirstArgument = mapToExpression(newFirstSymbol, firstArgument, context);
    RowExpression newSecondArgument = mapToExpression(newSecondSymbol, secondArgument, context);
    Optional<Symbol> leftPartitionSymbol = Optional.empty();
    Optional<Symbol> rightPartitionSymbol = Optional.empty();
    if (kdbTree.isPresent()) {
        leftPartitionSymbol = Optional.of(context.getSymbolAllocator().newSymbol("pid", INTEGER));
        rightPartitionSymbol = Optional.of(context.getSymbolAllocator().newSymbol("pid", INTEGER));
        if (alignment > 0) {
            newLeftNode = addPartitioningNodes(metadata, context, newLeftNode, leftPartitionSymbol.get(), kdbTree.get(), newFirstArgument, Optional.empty());
            newRightNode = addPartitioningNodes(metadata, context, newRightNode, rightPartitionSymbol.get(), kdbTree.get(), newSecondArgument, radius);
        } else {
            newLeftNode = addPartitioningNodes(metadata, context, newLeftNode, leftPartitionSymbol.get(), kdbTree.get(), newSecondArgument, Optional.empty());
            newRightNode = addPartitioningNodes(metadata, context, newRightNode, rightPartitionSymbol.get(), kdbTree.get(), newFirstArgument, radius);
        }
    }
    CallExpression newSpatialFunction = new CallExpression(spatialFunction.getDisplayName(), spatialFunction.getFunctionHandle(), spatialFunction.getType(), ImmutableList.of(newFirstArgument, newSecondArgument), Optional.empty());
    RowExpression newFilter = replaceExpression(filter, ImmutableMap.of(spatialFunction, newSpatialFunction));
    return Result.ofPlanNode(new SpatialJoinNode(nodeId, SpatialJoinNode.Type.fromJoinNodeType(joinNode.getType()), newLeftNode, newRightNode, outputSymbols, newFilter, leftPartitionSymbol, rightPartitionSymbol, kdbTree.map(KdbTreeUtils::toJson)));
}
Also used : ConstantExpression(io.prestosql.spi.relation.ConstantExpression) SymbolsExtractor.extractUnique(io.prestosql.sql.planner.SymbolsExtractor.extractUnique) SystemSessionProperties.getSpatialPartitioningTableName(io.prestosql.SystemSessionProperties.getSpatialPartitioningTableName) INVALID_SPATIAL_PARTITIONING(io.prestosql.spi.StandardErrorCode.INVALID_SPATIAL_PARTITIONING) TypeProvider(io.prestosql.sql.planner.TypeProvider) Result(io.prestosql.sql.planner.iterative.Rule.Result) KdbTree(io.prestosql.geospatial.KdbTree) TypeAnalyzer(io.prestosql.sql.planner.TypeAnalyzer) CallExpression(io.prestosql.spi.relation.CallExpression) TypeSignature.parseTypeSignature(io.prestosql.spi.type.TypeSignature.parseTypeSignature) Preconditions.checkArgument(com.google.common.base.Preconditions.checkArgument) KdbTreeUtils(io.prestosql.geospatial.KdbTreeUtils) Capture.newCapture(io.prestosql.matching.Capture.newCapture) FilterNode(io.prestosql.spi.plan.FilterNode) OperatorType(io.prestosql.spi.function.OperatorType) Map(java.util.Map) FunctionMetadata(io.prestosql.spi.function.FunctionMetadata) Slices.utf8Slice(io.airlift.slice.Slices.utf8Slice) Type(io.prestosql.spi.type.Type) RowExpressionNodeInliner.replaceExpression(io.prestosql.expressions.RowExpressionNodeInliner.replaceExpression) Splitter(com.google.common.base.Splitter) PlanNodeId(io.prestosql.spi.plan.PlanNodeId) PrestoException(io.prestosql.spi.PrestoException) ImmutableSet(com.google.common.collect.ImmutableSet) UNGROUPED_SCHEDULING(io.prestosql.spi.connector.ConnectorSplitManager.SplitSchedulingStrategy.UNGROUPED_SCHEDULING) ImmutableMap(com.google.common.collect.ImmutableMap) CastType(io.prestosql.metadata.CastType) ArrayType(io.prestosql.spi.type.ArrayType) Collection(java.util.Collection) ImmutableList.toImmutableList(com.google.common.collect.ImmutableList.toImmutableList) Set(java.util.Set) SpatialJoinUtils.extractSupportedSpatialComparisons(io.prestosql.util.SpatialJoinUtils.extractSupportedSpatialComparisons) PlanNode(io.prestosql.spi.plan.PlanNode) ProjectNode(io.prestosql.spi.plan.ProjectNode) Metadata(io.prestosql.metadata.Metadata) String.format(java.lang.String.format) FunctionHandle(io.prestosql.spi.function.FunctionHandle) UncheckedIOException(java.io.UncheckedIOException) Captures(io.prestosql.matching.Captures) SpatialJoinNode(io.prestosql.sql.planner.plan.SpatialJoinNode) List(java.util.List) ConnectorPageSource(io.prestosql.spi.connector.ConnectorPageSource) Capture(io.prestosql.matching.Capture) INNER(io.prestosql.spi.plan.JoinNode.Type.INNER) Optional(java.util.Optional) TypeSignature(io.prestosql.spi.type.TypeSignature) SystemSessionProperties.isSpatialJoinEnabled(io.prestosql.SystemSessionProperties.isSpatialJoinEnabled) NOT_PARTITIONED(io.prestosql.spi.connector.NotPartitionedPartitionHandle.NOT_PARTITIONED) Iterables(com.google.common.collect.Iterables) Patterns.source(io.prestosql.sql.planner.plan.Patterns.source) Patterns.join(io.prestosql.sql.planner.plan.Patterns.join) INTEGER(io.prestosql.spi.type.IntegerType.INTEGER) Pattern(io.prestosql.matching.Pattern) Split(io.prestosql.metadata.Split) TableHandle(io.prestosql.spi.metadata.TableHandle) QualifiedObjectName(io.prestosql.spi.connector.QualifiedObjectName) UnnestNode(io.prestosql.sql.planner.plan.UnnestNode) VARCHAR(io.prestosql.spi.type.VarcharType.VARCHAR) ImmutableList(com.google.common.collect.ImmutableList) Verify.verify(com.google.common.base.Verify.verify) Objects.requireNonNull(java.util.Objects.requireNonNull) Session(io.prestosql.Session) SpatialJoinUtils.extractSupportedSpatialFunctions(io.prestosql.util.SpatialJoinUtils.extractSupportedSpatialFunctions) PageSourceManager(io.prestosql.split.PageSourceManager) SplitSource(io.prestosql.split.SplitSource) SpatialJoinUtils(io.prestosql.util.SpatialJoinUtils) JoinNode(io.prestosql.spi.plan.JoinNode) Lifespan(io.prestosql.execution.Lifespan) Symbol(io.prestosql.spi.plan.Symbol) SpatialJoinUtils.getFlippedFunctionHandle(io.prestosql.util.SpatialJoinUtils.getFlippedFunctionHandle) TypeSignatureProvider.fromTypes(io.prestosql.sql.analyzer.TypeSignatureProvider.fromTypes) SplitBatch(io.prestosql.split.SplitSource.SplitBatch) Assignments(io.prestosql.spi.plan.Assignments) Rule(io.prestosql.sql.planner.iterative.Rule) Patterns.filter(io.prestosql.sql.planner.plan.Patterns.filter) Context(io.prestosql.sql.planner.iterative.Rule.Context) Page(io.prestosql.spi.Page) IOException(java.io.IOException) VariableReferenceExpression(io.prestosql.spi.relation.VariableReferenceExpression) MoreFutures.getFutureValue(io.airlift.concurrent.MoreFutures.getFutureValue) Expressions(io.prestosql.sql.relational.Expressions) ColumnHandle(io.prestosql.spi.connector.ColumnHandle) SplitManager(io.prestosql.split.SplitManager) RowExpression(io.prestosql.spi.relation.RowExpression) VisibleForTesting(com.google.common.annotations.VisibleForTesting) Collections(java.util.Collections) LEFT(io.prestosql.spi.plan.JoinNode.Type.LEFT) KdbTreeUtils(io.prestosql.geospatial.KdbTreeUtils) KdbTree(io.prestosql.geospatial.KdbTree) Symbol(io.prestosql.spi.plan.Symbol) RowExpression(io.prestosql.spi.relation.RowExpression) SpatialJoinNode(io.prestosql.sql.planner.plan.SpatialJoinNode) OperatorType(io.prestosql.spi.function.OperatorType) Type(io.prestosql.spi.type.Type) CastType(io.prestosql.metadata.CastType) ArrayType(io.prestosql.spi.type.ArrayType) PlanNode(io.prestosql.spi.plan.PlanNode) CallExpression(io.prestosql.spi.relation.CallExpression)

Example 19 with INTEGER

use of io.prestosql.spi.type.IntegerType.INTEGER in project hetu-core by openlookeng.

the class TestSignatureBinder method testFunction.

@Test
public void testFunction() {
    Signature simple = functionSignature().returnType(parseTypeSignature("boolean")).argumentTypes(parseTypeSignature("function(integer,integer)")).build();
    assertThat(simple).boundTo("integer").fails();
    assertThat(simple).boundTo("function(integer,integer)").succeeds();
    // TODO: Support coercion of return type of lambda
    assertThat(simple).boundTo("function(integer,smallint)").withCoercion().fails();
    assertThat(simple).boundTo("function(integer,bigint)").withCoercion().fails();
    Signature applyTwice = functionSignature().returnType(parseTypeSignature("V")).argumentTypes(parseTypeSignature("T"), parseTypeSignature("function(T,U)"), parseTypeSignature("function(U,V)")).typeVariableConstraints(typeVariable("T"), typeVariable("U"), typeVariable("V")).build();
    assertThat(applyTwice).boundTo("integer", "integer", "integer").fails();
    assertThat(applyTwice).boundTo("integer", "function(integer,varchar)", "function(varchar,double)").produces(BoundVariables.builder().setTypeVariable("T", INTEGER).setTypeVariable("U", VARCHAR).setTypeVariable("V", DOUBLE).build());
    assertThat(applyTwice).boundTo("integer", new TypeSignatureProvider(functionArgumentTypes -> TypeSignature.parseTypeSignature("function(integer,varchar)")), new TypeSignatureProvider(functionArgumentTypes -> TypeSignature.parseTypeSignature("function(varchar,double)"))).produces(BoundVariables.builder().setTypeVariable("T", INTEGER).setTypeVariable("U", VARCHAR).setTypeVariable("V", DOUBLE).build());
    assertThat(applyTwice).boundTo(// pass function argument to non-function position of a function
    new TypeSignatureProvider(functionArgumentTypes -> TypeSignature.parseTypeSignature("function(integer,varchar)")), new TypeSignatureProvider(functionArgumentTypes -> TypeSignature.parseTypeSignature("function(integer,varchar)")), new TypeSignatureProvider(functionArgumentTypes -> TypeSignature.parseTypeSignature("function(varchar,double)"))).fails();
    assertThat(applyTwice).boundTo(new TypeSignatureProvider(functionArgumentTypes -> TypeSignature.parseTypeSignature("function(integer,varchar)")), // pass non-function argument to function position of a function
    "integer", new TypeSignatureProvider(functionArgumentTypes -> TypeSignature.parseTypeSignature("function(varchar,double)"))).fails();
    Signature flatMap = functionSignature().returnType(parseTypeSignature("array(T)")).argumentTypes(parseTypeSignature("array(T)"), parseTypeSignature("function(T, array(T))")).typeVariableConstraints(typeVariable("T")).build();
    assertThat(flatMap).boundTo("array(integer)", "function(integer, array(integer))").produces(BoundVariables.builder().setTypeVariable("T", INTEGER).build());
    Signature varargApply = functionSignature().returnType(parseTypeSignature("T")).argumentTypes(parseTypeSignature("T"), parseTypeSignature("function(T, T)")).typeVariableConstraints(typeVariable("T")).setVariableArity(true).build();
    assertThat(varargApply).boundTo("integer", "function(integer, integer)", "function(integer, integer)", "function(integer, integer)").produces(BoundVariables.builder().setTypeVariable("T", INTEGER).build());
    assertThat(varargApply).boundTo("integer", "function(integer, integer)", "function(integer, double)", "function(double, double)").fails();
    Signature loop = functionSignature().returnType(parseTypeSignature("T")).argumentTypes(parseTypeSignature("T"), parseTypeSignature("function(T, T)")).typeVariableConstraints(typeVariable("T")).build();
    assertThat(loop).boundTo("integer", new TypeSignatureProvider(paramTypes -> new FunctionType(paramTypes, BIGINT).getTypeSignature())).fails();
    assertThat(loop).boundTo("integer", new TypeSignatureProvider(paramTypes -> new FunctionType(paramTypes, BIGINT).getTypeSignature())).withCoercion().produces(BoundVariables.builder().setTypeVariable("T", BIGINT).build());
    // TODO: Support coercion of return type of lambda
    assertThat(loop).withCoercion().boundTo("integer", new TypeSignatureProvider(paramTypes -> new FunctionType(paramTypes, SMALLINT).getTypeSignature())).fails();
    // TODO: Support coercion of return type of lambda
    // Without coercion support for return type of lambda, the return type of lambda must be `varchar(x)` to avoid need for coercions.
    Signature varcharApply = functionSignature().returnType(parseTypeSignature("varchar")).argumentTypes(parseTypeSignature("varchar"), parseTypeSignature("function(varchar, varchar(x))", ImmutableSet.of("x"))).build();
    assertThat(varcharApply).withCoercion().boundTo("varchar(10)", new TypeSignatureProvider(paramTypes -> new FunctionType(paramTypes, createVarcharType(1)).getTypeSignature())).succeeds();
    Signature sortByKey = functionSignature().returnType(parseTypeSignature("array(T)")).argumentTypes(parseTypeSignature("array(T)"), parseTypeSignature("function(T,E)")).typeVariableConstraints(typeVariable("T"), orderableTypeParameter("E")).build();
    assertThat(sortByKey).boundTo("array(integer)", new TypeSignatureProvider(paramTypes -> new FunctionType(paramTypes, VARCHAR).getTypeSignature())).produces(BoundVariables.builder().setTypeVariable("T", INTEGER).setTypeVariable("E", VARCHAR).build());
}
Also used : TypeSignatureProvider(io.prestosql.sql.analyzer.TypeSignatureProvider) StandardTypes(io.prestosql.spi.type.StandardTypes) Signature.orderableTypeParameter(io.prestosql.spi.function.Signature.orderableTypeParameter) TypeSignatureProvider(io.prestosql.sql.analyzer.TypeSignatureProvider) TypeVariableConstraint(io.prestosql.spi.function.TypeVariableConstraint) SCALAR(io.prestosql.spi.function.FunctionKind.SCALAR) Assert.assertEquals(org.testng.Assert.assertEquals) DecimalType(io.prestosql.spi.type.DecimalType) Test(org.testng.annotations.Test) INTEGER(io.prestosql.spi.type.IntegerType.INTEGER) Signature.comparableTypeParameter(io.prestosql.spi.function.Signature.comparableTypeParameter) TypeSignature.parseTypeSignature(io.prestosql.spi.type.TypeSignature.parseTypeSignature) VARCHAR(io.prestosql.spi.type.VarcharType.VARCHAR) ImmutableList(com.google.common.collect.ImmutableList) Objects.requireNonNull(java.util.Objects.requireNonNull) SignatureBuilder(io.prestosql.spi.function.SignatureBuilder) BOOLEAN(io.prestosql.spi.type.BooleanType.BOOLEAN) DOUBLE(io.prestosql.spi.type.DoubleType.DOUBLE) Type(io.prestosql.spi.type.Type) Signature(io.prestosql.spi.function.Signature) BIGINT(io.prestosql.spi.type.BigintType.BIGINT) Assert.assertFalse(org.testng.Assert.assertFalse) ImmutableSet(com.google.common.collect.ImmutableSet) TypeSignatureProvider.fromTypes(io.prestosql.sql.analyzer.TypeSignatureProvider.fromTypes) ImmutableMap(com.google.common.collect.ImmutableMap) MetadataManager.createTestMetadataManager(io.prestosql.metadata.MetadataManager.createTestMetadataManager) Assert.fail(org.testng.Assert.fail) FunctionType(io.prestosql.spi.type.FunctionType) Set(java.util.Set) Assert.assertNotNull(org.testng.Assert.assertNotNull) Signature.typeVariable(io.prestosql.spi.function.Signature.typeVariable) String.format(java.lang.String.format) VarcharType.createVarcharType(io.prestosql.spi.type.VarcharType.createVarcharType) List(java.util.List) Collectors.toList(java.util.stream.Collectors.toList) SMALLINT(io.prestosql.spi.type.SmallintType.SMALLINT) Optional(java.util.Optional) Assert.assertTrue(org.testng.Assert.assertTrue) TypeSignature(io.prestosql.spi.type.TypeSignature) Signature.withVariadicBound(io.prestosql.spi.function.Signature.withVariadicBound) TypeSignature.parseTypeSignature(io.prestosql.spi.type.TypeSignature.parseTypeSignature) Signature(io.prestosql.spi.function.Signature) TypeSignature(io.prestosql.spi.type.TypeSignature) FunctionType(io.prestosql.spi.type.FunctionType) Test(org.testng.annotations.Test)

Example 20 with INTEGER

use of io.prestosql.spi.type.IntegerType.INTEGER in project hetu-core by openlookeng.

the class OrcPageSourceFactory method createOrcPageSource.

public static OrcPageSource createOrcPageSource(HdfsEnvironment hdfsEnvironment, String sessionUser, Configuration configuration, Path path, long start, long length, long fileSize, List<HiveColumnHandle> columns, boolean useOrcColumnNames, boolean isFullAcid, TupleDomain<HiveColumnHandle> effectivePredicate, DateTimeZone legacyFileTimeZone, TypeManager typeManager, DataSize maxMergeDistance, DataSize maxBufferSize, DataSize streamBufferSize, DataSize tinyStripeThreshold, DataSize maxReadBlockSize, boolean lazyReadSmallRanges, boolean orcBloomFiltersEnabled, FileFormatDataSourceStats stats, Optional<DynamicFilterSupplier> dynamicFilters, Optional<DeleteDeltaLocations> deleteDeltaLocations, Optional<Long> startRowOffsetOfFile, Optional<List<IndexMetadata>> indexes, SplitMetadata splitMetadata, OrcCacheStore orcCacheStore, OrcCacheProperties orcCacheProperties, int domainCompactionThreshold, boolean pageMetadataEnabled, long dataSourceLastModifiedTime) {
    for (HiveColumnHandle column : columns) {
        checkArgument(column.getColumnType() == HiveColumnHandle.ColumnType.REGULAR || column.getHiveColumnIndex() == HiveColumnHandle.ROW_ID__COLUMN_INDEX, "column type must be regular: %s", column);
    }
    checkArgument(!effectivePredicate.isNone());
    OrcDataSource orcDataSource;
    try {
        // Always create a lazy Stream. HDFS stream opened only when required.
        FSDataInputStream inputStream = new FSDataInputStream(new LazyFSInputStream(() -> {
            FileSystem fileSystem = hdfsEnvironment.getFileSystem(sessionUser, path, configuration);
            return hdfsEnvironment.doAs(sessionUser, () -> fileSystem.open(path));
        }));
        orcDataSource = new HdfsOrcDataSource(new OrcDataSourceId(path.toString()), fileSize, maxMergeDistance, maxBufferSize, streamBufferSize, lazyReadSmallRanges, inputStream, stats, dataSourceLastModifiedTime);
    } catch (Exception e) {
        if (nullToEmpty(e.getMessage()).trim().equals("Filesystem closed") || e instanceof FileNotFoundException) {
            throw new PrestoException(HIVE_CANNOT_OPEN_SPLIT, e);
        }
        throw new PrestoException(HIVE_CANNOT_OPEN_SPLIT, splitError(e, path, start, length), e);
    }
    AggregatedMemoryContext systemMemoryUsage = newSimpleAggregatedMemoryContext();
    try {
        OrcDataSource readerLocalDataSource = OrcReader.wrapWithCacheIfTiny(orcDataSource, tinyStripeThreshold);
        OrcFileTail fileTail;
        if (orcCacheProperties.isFileTailCacheEnabled()) {
            try {
                OrcDataSourceIdWithTimeStamp orcDataSourceIdWithTimeStamp = new OrcDataSourceIdWithTimeStamp(readerLocalDataSource.getId(), readerLocalDataSource.getLastModifiedTime());
                fileTail = orcCacheStore.getFileTailCache().get(new OrcFileTailCacheKey(orcDataSourceIdWithTimeStamp), () -> OrcPageSourceFactory.createFileTail(orcDataSource));
            } catch (UncheckedExecutionException | ExecutionException executionException) {
                handleCacheLoadException(executionException);
                log.debug(executionException.getCause(), "Error while caching the Orc file tail. Falling back to default flow");
                fileTail = OrcPageSourceFactory.createFileTail(orcDataSource);
            }
        } else {
            fileTail = OrcPageSourceFactory.createFileTail(orcDataSource);
        }
        OrcReader reader = new OrcReader(readerLocalDataSource, fileTail, maxMergeDistance, tinyStripeThreshold, maxReadBlockSize);
        List<OrcColumn> fileColumns = reader.getRootColumn().getNestedColumns();
        List<OrcColumn> fileReadColumns = isFullAcid ? new ArrayList<>(columns.size() + 5) : new ArrayList<>(columns.size());
        List<Type> fileReadTypes = isFullAcid ? new ArrayList<>(columns.size() + 5) : new ArrayList<>(columns.size());
        ImmutableList<String> acidColumnNames = null;
        List<ColumnAdaptation> columnAdaptations = new ArrayList<>(columns.size());
        // Only Hive ACID files will begin with bucket_
        boolean fileNameContainsBucket = path.getName().contains("bucket");
        if (isFullAcid && fileNameContainsBucket) {
            // Skip the acid schema check in case of non-ACID files
            acidColumnNames = ImmutableList.<String>builder().add(ACID_COLUMN_ORIGINAL_TRANSACTION, ACID_COLUMN_BUCKET, ACID_COLUMN_ROW_ID, ACID_COLUMN_CURRENT_TRANSACTION, ACID_COLUMN_OPERATION).build();
            verifyAcidSchema(reader, path);
            Map<String, OrcColumn> acidColumnsByName = uniqueIndex(fileColumns, orcColumn -> orcColumn.getColumnName().toLowerCase(ENGLISH));
            if (AcidUtils.isDeleteDelta(path.getParent())) {
                // Avoid reading column data from delete_delta files.
                // Call will come here in case of Minor VACUUM where all delete_delta files are merge together.
                fileColumns = ImmutableList.of();
            } else {
                fileColumns = ensureColumnNameConsistency(acidColumnsByName.get(ACID_COLUMN_ROW_STRUCT).getNestedColumns(), columns);
            }
            fileReadColumns.add(acidColumnsByName.get(ACID_COLUMN_ORIGINAL_TRANSACTION.toLowerCase(ENGLISH)));
            fileReadTypes.add(BIGINT);
            fileReadColumns.add(acidColumnsByName.get(ACID_COLUMN_BUCKET.toLowerCase(ENGLISH)));
            fileReadTypes.add(INTEGER);
            fileReadColumns.add(acidColumnsByName.get(ACID_COLUMN_ROW_ID.toLowerCase(ENGLISH)));
            fileReadTypes.add(BIGINT);
            fileReadColumns.add(acidColumnsByName.get(ACID_COLUMN_CURRENT_TRANSACTION.toLowerCase(ENGLISH)));
            fileReadTypes.add(BIGINT);
            fileReadColumns.add(acidColumnsByName.get(ACID_COLUMN_OPERATION.toLowerCase(ENGLISH)));
            fileReadTypes.add(INTEGER);
        }
        Map<String, OrcColumn> fileColumnsByName = ImmutableMap.of();
        if (useOrcColumnNames || isFullAcid) {
            verifyFileHasColumnNames(fileColumns, path);
            // Convert column names read from ORC files to lower case to be consistent with those stored in Hive Metastore
            fileColumnsByName = uniqueIndex(fileColumns, orcColumn -> orcColumn.getColumnName().toLowerCase(ENGLISH));
        }
        TupleDomainOrcPredicateBuilder predicateBuilder = TupleDomainOrcPredicate.builder().setBloomFiltersEnabled(orcBloomFiltersEnabled);
        Map<HiveColumnHandle, Domain> effectivePredicateDomains = effectivePredicate.getDomains().orElseThrow(() -> new IllegalArgumentException("Effective predicate is none"));
        for (HiveColumnHandle column : columns) {
            OrcColumn orcColumn = null;
            if (useOrcColumnNames || isFullAcid) {
                orcColumn = fileColumnsByName.get(column.getName());
            } else if (column.getHiveColumnIndex() >= 0 && column.getHiveColumnIndex() < fileColumns.size()) {
                orcColumn = fileColumns.get(column.getHiveColumnIndex());
            }
            Type readType = typeManager.getType(column.getTypeSignature());
            if (orcColumn != null) {
                int sourceIndex = fileReadColumns.size();
                columnAdaptations.add(ColumnAdaptation.sourceColumn(sourceIndex));
                fileReadColumns.add(orcColumn);
                fileReadTypes.add(readType);
                Domain domain = effectivePredicateDomains.get(column);
                if (domain != null) {
                    predicateBuilder.addColumn(orcColumn.getColumnId(), domain);
                }
            } else if (isFullAcid && readType instanceof RowType && column.getName().equalsIgnoreCase(HiveColumnHandle.UPDATE_ROW_ID_COLUMN_NAME)) {
                HiveType hiveType = column.getHiveType();
                StructTypeInfo structTypeInfo = (StructTypeInfo) hiveType.getTypeInfo();
                ImmutableList.Builder<ColumnAdaptation> builder = new ImmutableList.Builder<>();
                ArrayList<String> fieldNames = structTypeInfo.getAllStructFieldNames();
                List<ColumnAdaptation> adaptations = fieldNames.stream().map(acidColumnNames::indexOf).map(c -> ColumnAdaptation.sourceColumn(c, false)).collect(Collectors.toList());
                columnAdaptations.add(ColumnAdaptation.structColumn(structTypeInfo, adaptations));
            } else {
                columnAdaptations.add(ColumnAdaptation.nullColumn(readType));
            }
        }
        Map<String, Domain> domains = effectivePredicate.getDomains().get().entrySet().stream().collect(toMap(e -> e.getKey().getName(), Map.Entry::getValue));
        OrcRecordReader recordReader = reader.createRecordReader(fileReadColumns, fileReadTypes, predicateBuilder.build(), start, length, legacyFileTimeZone, systemMemoryUsage, INITIAL_BATCH_SIZE, exception -> handleException(orcDataSource.getId(), exception), indexes, splitMetadata, domains, orcCacheStore, orcCacheProperties, pageMetadataEnabled);
        OrcDeletedRows deletedRows = new OrcDeletedRows(path.getName(), deleteDeltaLocations, new OrcDeleteDeltaPageSourceFactory(sessionUser, configuration, hdfsEnvironment, maxMergeDistance, maxBufferSize, streamBufferSize, maxReadBlockSize, tinyStripeThreshold, lazyReadSmallRanges, orcBloomFiltersEnabled, stats), sessionUser, configuration, hdfsEnvironment, startRowOffsetOfFile);
        boolean eagerload = false;
        if (indexes.isPresent()) {
            eagerload = indexes.get().stream().anyMatch(indexMetadata -> EAGER_LOAD_INDEX_ID.contains(indexMetadata.getIndex().getId()));
        }
        return new OrcPageSource(recordReader, columnAdaptations, orcDataSource, deletedRows, eagerload, systemMemoryUsage, stats);
    } catch (Exception e) {
        try {
            orcDataSource.close();
        } catch (IOException ignored) {
        }
        if (e instanceof PrestoException) {
            throw (PrestoException) e;
        }
        String message = splitError(e, path, start, length);
        if (e instanceof BlockMissingException) {
            throw new PrestoException(HIVE_MISSING_DATA, message, e);
        }
        throw new PrestoException(HIVE_CANNOT_OPEN_SPLIT, message, e);
    }
}
Also used : OrcReader(io.prestosql.orc.OrcReader) DateTimeZone(org.joda.time.DateTimeZone) LONG(io.prestosql.orc.metadata.OrcType.OrcTypeKind.LONG) TupleDomainOrcPredicate(io.prestosql.orc.TupleDomainOrcPredicate) FileSystem(org.apache.hadoop.fs.FileSystem) HiveSessionProperties.isOrcStripeFooterCacheEnabled(io.prestosql.plugin.hive.HiveSessionProperties.isOrcStripeFooterCacheEnabled) Maps.uniqueIndex(com.google.common.collect.Maps.uniqueIndex) HiveSessionProperties.isOrcRowDataCacheEnabled(io.prestosql.plugin.hive.HiveSessionProperties.isOrcRowDataCacheEnabled) BlockMissingException(org.apache.hadoop.hdfs.BlockMissingException) Configuration(org.apache.hadoop.conf.Configuration) Map(java.util.Map) AggregatedMemoryContext(io.prestosql.memory.context.AggregatedMemoryContext) RowType(io.prestosql.spi.type.RowType) HiveSessionProperties.getOrcStreamBufferSize(io.prestosql.plugin.hive.HiveSessionProperties.getOrcStreamBufferSize) StructTypeInfo(org.apache.hadoop.hive.serde2.typeinfo.StructTypeInfo) ENGLISH(java.util.Locale.ENGLISH) FSDataInputStream(org.apache.hadoop.fs.FSDataInputStream) ColumnAdaptation(io.prestosql.plugin.hive.orc.OrcPageSource.ColumnAdaptation) FileFormatDataSourceStats(io.prestosql.plugin.hive.FileFormatDataSourceStats) ImmutableMap.toImmutableMap(com.google.common.collect.ImmutableMap.toImmutableMap) ConnectorPageSource(io.prestosql.spi.connector.ConnectorPageSource) HIVE_BAD_DATA(io.prestosql.plugin.hive.HiveErrorCode.HIVE_BAD_DATA) OrcCacheProperties(io.prestosql.orc.OrcCacheProperties) Domain(io.prestosql.spi.predicate.Domain) SplitMetadata(io.prestosql.spi.heuristicindex.SplitMetadata) HiveSessionProperties.getOrcMaxBufferSize(io.prestosql.plugin.hive.HiveSessionProperties.getOrcMaxBufferSize) TupleDomainOrcPredicateBuilder(io.prestosql.orc.TupleDomainOrcPredicate.TupleDomainOrcPredicateBuilder) ArrayList(java.util.ArrayList) INITIAL_BATCH_SIZE(io.prestosql.orc.OrcReader.INITIAL_BATCH_SIZE) HdfsEnvironment(io.prestosql.plugin.hive.HdfsEnvironment) DynamicFilterSupplier(io.prestosql.spi.dynamicfilter.DynamicFilterSupplier) HiveSessionProperties.getOrcMaxReadBlockSize(io.prestosql.plugin.hive.HiveSessionProperties.getOrcMaxReadBlockSize) HiveSessionProperties.isOrcFileTailCacheEnabled(io.prestosql.plugin.hive.HiveSessionProperties.isOrcFileTailCacheEnabled) HivePageSourceFactory(io.prestosql.plugin.hive.HivePageSourceFactory) Properties(java.util.Properties) DeleteDeltaLocations(io.prestosql.plugin.hive.DeleteDeltaLocations) TypeManager(io.prestosql.spi.type.TypeManager) IOException(java.io.IOException) ExecutionException(java.util.concurrent.ExecutionException) HiveSessionProperties.isOrcBloomFiltersEnabled(io.prestosql.plugin.hive.HiveSessionProperties.isOrcBloomFiltersEnabled) OrcPageSource.handleException(io.prestosql.plugin.hive.orc.OrcPageSource.handleException) STRUCT(io.prestosql.orc.metadata.OrcType.OrcTypeKind.STRUCT) HiveSessionProperties.getOrcLazyReadSmallRanges(io.prestosql.plugin.hive.HiveSessionProperties.getOrcLazyReadSmallRanges) OrcRecordReader(io.prestosql.orc.OrcRecordReader) HiveColumnHandle(io.prestosql.plugin.hive.HiveColumnHandle) OrcSerde(org.apache.hadoop.hive.ql.io.orc.OrcSerde) Seekable(org.apache.hadoop.fs.Seekable) HiveSessionProperties.getOrcTinyStripeThreshold(io.prestosql.plugin.hive.HiveSessionProperties.getOrcTinyStripeThreshold) Preconditions.checkArgument(com.google.common.base.Preconditions.checkArgument) ConnectorSession(io.prestosql.spi.connector.ConnectorSession) Collectors.toMap(java.util.stream.Collectors.toMap) HiveConfig(io.prestosql.plugin.hive.HiveConfig) OrcDataSourceIdWithTimeStamp(io.prestosql.orc.OrcDataSourceIdWithTimeStamp) Path(org.apache.hadoop.fs.Path) Type(io.prestosql.spi.type.Type) BIGINT(io.prestosql.spi.type.BigintType.BIGINT) PrestoException(io.prestosql.spi.PrestoException) OrcFileTail(io.prestosql.orc.OrcFileTail) OrcTypeKind(io.prestosql.orc.metadata.OrcType.OrcTypeKind) ImmutableMap(com.google.common.collect.ImmutableMap) INT(io.prestosql.orc.metadata.OrcType.OrcTypeKind.INT) AcidUtils.isFullAcidTable(org.apache.hadoop.hive.ql.io.AcidUtils.isFullAcidTable) HIVE_FILE_MISSING_COLUMN_NAMES(io.prestosql.plugin.hive.HiveErrorCode.HIVE_FILE_MISSING_COLUMN_NAMES) HiveSessionProperties.isOrcBloomFiltersCacheEnabled(io.prestosql.plugin.hive.HiveSessionProperties.isOrcBloomFiltersCacheEnabled) OrcDataSource(io.prestosql.orc.OrcDataSource) HiveType(io.prestosql.plugin.hive.HiveType) Collectors(java.util.stream.Collectors) FileNotFoundException(java.io.FileNotFoundException) String.format(java.lang.String.format) OrcColumn(io.prestosql.orc.OrcColumn) OrcFileTailCacheKey(io.prestosql.orc.OrcFileTailCacheKey) DataSize(io.airlift.units.DataSize) List(java.util.List) HiveSessionProperties.getOrcMaxMergeDistance(io.prestosql.plugin.hive.HiveSessionProperties.getOrcMaxMergeDistance) Function.identity(java.util.function.Function.identity) Optional(java.util.Optional) Pattern(java.util.regex.Pattern) IndexMetadata(io.prestosql.spi.heuristicindex.IndexMetadata) AcidUtils(org.apache.hadoop.hive.ql.io.AcidUtils) AggregatedMemoryContext.newSimpleAggregatedMemoryContext(io.prestosql.memory.context.AggregatedMemoryContext.newSimpleAggregatedMemoryContext) HIVE_CANNOT_OPEN_SPLIT(io.prestosql.plugin.hive.HiveErrorCode.HIVE_CANNOT_OPEN_SPLIT) Logger(io.airlift.log.Logger) FixedPageSource(io.prestosql.spi.connector.FixedPageSource) Strings.nullToEmpty(com.google.common.base.Strings.nullToEmpty) HashMap(java.util.HashMap) INTEGER(io.prestosql.spi.type.IntegerType.INTEGER) Inject(javax.inject.Inject) HIVE_MISSING_DATA(io.prestosql.plugin.hive.HiveErrorCode.HIVE_MISSING_DATA) ImmutableList(com.google.common.collect.ImmutableList) UncheckedExecutionException(com.google.common.util.concurrent.UncheckedExecutionException) Objects.requireNonNull(java.util.Objects.requireNonNull) PositionedReadable(org.apache.hadoop.fs.PositionedReadable) HiveSessionProperties.isOrcRowIndexCacheEnabled(io.prestosql.plugin.hive.HiveSessionProperties.isOrcRowIndexCacheEnabled) HiveUtil(io.prestosql.plugin.hive.HiveUtil) TupleDomain(io.prestosql.spi.predicate.TupleDomain) Maps(com.google.common.collect.Maps) OrcCacheStore(io.prestosql.orc.OrcCacheStore) OrcDataSourceId(io.prestosql.orc.OrcDataSourceId) OrcReader.handleCacheLoadException(io.prestosql.orc.OrcReader.handleCacheLoadException) InputStream(java.io.InputStream) TupleDomainOrcPredicateBuilder(io.prestosql.orc.TupleDomainOrcPredicate.TupleDomainOrcPredicateBuilder) FileNotFoundException(java.io.FileNotFoundException) ArrayList(java.util.ArrayList) OrcFileTail(io.prestosql.orc.OrcFileTail) FileSystem(org.apache.hadoop.fs.FileSystem) ArrayList(java.util.ArrayList) List(java.util.List) ImmutableList(com.google.common.collect.ImmutableList) UncheckedExecutionException(com.google.common.util.concurrent.UncheckedExecutionException) OrcColumn(io.prestosql.orc.OrcColumn) OrcReader(io.prestosql.orc.OrcReader) FSDataInputStream(org.apache.hadoop.fs.FSDataInputStream) OrcFileTailCacheKey(io.prestosql.orc.OrcFileTailCacheKey) Domain(io.prestosql.spi.predicate.Domain) TupleDomain(io.prestosql.spi.predicate.TupleDomain) HiveType(io.prestosql.plugin.hive.HiveType) Map(java.util.Map) ImmutableMap.toImmutableMap(com.google.common.collect.ImmutableMap.toImmutableMap) Collectors.toMap(java.util.stream.Collectors.toMap) ImmutableMap(com.google.common.collect.ImmutableMap) HashMap(java.util.HashMap) ColumnAdaptation(io.prestosql.plugin.hive.orc.OrcPageSource.ColumnAdaptation) ImmutableList(com.google.common.collect.ImmutableList) RowType(io.prestosql.spi.type.RowType) PrestoException(io.prestosql.spi.PrestoException) StructTypeInfo(org.apache.hadoop.hive.serde2.typeinfo.StructTypeInfo) BlockMissingException(org.apache.hadoop.hdfs.BlockMissingException) ExecutionException(java.util.concurrent.ExecutionException) UncheckedExecutionException(com.google.common.util.concurrent.UncheckedExecutionException) HiveColumnHandle(io.prestosql.plugin.hive.HiveColumnHandle) OrcDataSource(io.prestosql.orc.OrcDataSource) OrcDataSourceId(io.prestosql.orc.OrcDataSourceId) OrcDataSourceIdWithTimeStamp(io.prestosql.orc.OrcDataSourceIdWithTimeStamp) IOException(java.io.IOException) OrcRecordReader(io.prestosql.orc.OrcRecordReader) AggregatedMemoryContext(io.prestosql.memory.context.AggregatedMemoryContext) AggregatedMemoryContext.newSimpleAggregatedMemoryContext(io.prestosql.memory.context.AggregatedMemoryContext.newSimpleAggregatedMemoryContext) BlockMissingException(org.apache.hadoop.hdfs.BlockMissingException) IOException(java.io.IOException) ExecutionException(java.util.concurrent.ExecutionException) OrcPageSource.handleException(io.prestosql.plugin.hive.orc.OrcPageSource.handleException) PrestoException(io.prestosql.spi.PrestoException) FileNotFoundException(java.io.FileNotFoundException) UncheckedExecutionException(com.google.common.util.concurrent.UncheckedExecutionException) OrcReader.handleCacheLoadException(io.prestosql.orc.OrcReader.handleCacheLoadException) RowType(io.prestosql.spi.type.RowType) Type(io.prestosql.spi.type.Type) HiveType(io.prestosql.plugin.hive.HiveType) TupleDomainOrcPredicateBuilder(io.prestosql.orc.TupleDomainOrcPredicate.TupleDomainOrcPredicateBuilder)

Aggregations

INTEGER (io.prestosql.spi.type.IntegerType.INTEGER)24 Type (io.prestosql.spi.type.Type)23 List (java.util.List)23 ImmutableList (com.google.common.collect.ImmutableList)22 Optional (java.util.Optional)19 BIGINT (io.prestosql.spi.type.BigintType.BIGINT)17 ImmutableMap (com.google.common.collect.ImmutableMap)15 Page (io.prestosql.spi.Page)15 IOException (java.io.IOException)15 String.format (java.lang.String.format)13 ArrayList (java.util.ArrayList)13 Collectors.toList (java.util.stream.Collectors.toList)13 ConnectorSession (io.prestosql.spi.connector.ConnectorSession)12 Map (java.util.Map)12 Objects.requireNonNull (java.util.Objects.requireNonNull)12 Properties (java.util.Properties)12 Path (org.apache.hadoop.fs.Path)12 ConnectorPageSource (io.prestosql.spi.connector.ConnectorPageSource)11 DOUBLE (io.prestosql.spi.type.DoubleType.DOUBLE)11 TypeManager (io.prestosql.spi.type.TypeManager)11