Search in sources :

Example 16 with SortOrder

use of io.trino.spi.connector.SortOrder in project trino by trinodb.

the class TestWindowOperator method testDistinctPartitionAndPeers.

@Test(dataProvider = "spillEnabled")
public void testDistinctPartitionAndPeers(boolean spillEnabled, boolean revokeMemoryWhenAddingPages, long memoryLimit) {
    List<Page> input = rowPagesBuilder(DOUBLE, DOUBLE).row(1.0, 1.0).row(1.0, 0.0).row(1.0, Double.NaN).row(1.0, null).row(2.0, 2.0).row(2.0, Double.NaN).row(Double.NaN, Double.NaN).row(Double.NaN, Double.NaN).row(null, null).row(null, 1.0).row(null, null).pageBreak().row(1.0, Double.NaN).row(1.0, null).row(2.0, 2.0).row(2.0, null).row(Double.NaN, 3.0).row(Double.NaN, null).row(null, 2.0).row(null, null).build();
    WindowOperatorFactory operatorFactory = createFactoryUnbounded(ImmutableList.of(DOUBLE, DOUBLE), Ints.asList(0, 1), RANK, Ints.asList(0), Ints.asList(1), ImmutableList.copyOf(new SortOrder[] { SortOrder.ASC_NULLS_LAST }), spillEnabled);
    DriverContext driverContext = createDriverContext(memoryLimit);
    MaterializedResult expected = resultBuilder(driverContext.getSession(), DOUBLE, DOUBLE, BIGINT).row(1.0, 0.0, 1L).row(1.0, 1.0, 2L).row(1.0, Double.NaN, 3L).row(1.0, Double.NaN, 3L).row(1.0, null, 5L).row(1.0, null, 5L).row(2.0, 2.0, 1L).row(2.0, 2.0, 1L).row(2.0, Double.NaN, 3L).row(2.0, null, 4L).row(Double.NaN, 3.0, 1L).row(Double.NaN, Double.NaN, 2L).row(Double.NaN, Double.NaN, 2L).row(Double.NaN, null, 4L).row(null, 1.0, 1L).row(null, 2.0, 2L).row(null, null, 3L).row(null, null, 3L).row(null, null, 3L).build();
    assertOperatorEquals(operatorFactory, driverContext, input, expected, revokeMemoryWhenAddingPages);
}
Also used : WindowOperatorFactory(io.trino.operator.WindowOperator.WindowOperatorFactory) SortOrder(io.trino.spi.connector.SortOrder) Page(io.trino.spi.Page) MaterializedResult(io.trino.testing.MaterializedResult) OperatorAssertion.toMaterializedResult(io.trino.operator.OperatorAssertion.toMaterializedResult) Test(org.testng.annotations.Test)

Example 17 with SortOrder

use of io.trino.spi.connector.SortOrder in project trino by trinodb.

the class OrderingCompiler method generateMergeSortCompareTo.

private void generateMergeSortCompareTo(ClassDefinition classDefinition, CallSiteBinder callSiteBinder, List<Type> types, List<Integer> sortChannels, List<SortOrder> sortOrders) {
    Parameter leftPage = arg("leftPage", Page.class);
    Parameter leftPosition = arg("leftPosition", int.class);
    Parameter rightPage = arg("rightPage", Page.class);
    Parameter rightPosition = arg("rightPosition", int.class);
    MethodDefinition compareToMethod = classDefinition.declareMethod(a(PUBLIC), "compareTo", type(int.class), leftPage, leftPosition, rightPage, rightPosition);
    for (int i = 0; i < sortChannels.size(); i++) {
        int sortChannel = sortChannels.get(i);
        SortOrder sortOrder = sortOrders.get(i);
        Type sortType = types.get(sortChannel);
        MethodHandle compareBlockValue = getBlockPositionOrderingOperator(sortOrder, sortType);
        BytecodeBlock block = new BytecodeBlock().setDescription("compare channel " + sortChannel + " " + sortOrder);
        BytecodeExpression leftBlock = leftPage.invoke("getBlock", Block.class, constantInt(sortChannel));
        BytecodeExpression rightBlock = rightPage.invoke("getBlock", Block.class, constantInt(sortChannel));
        block.append(invokeDynamic(BOOTSTRAP_METHOD, ImmutableList.of(callSiteBinder.bind(compareBlockValue).getBindingId()), "compareBlockValue", compareBlockValue.type(), leftBlock, leftPosition, rightBlock, rightPosition));
        LabelNode equal = new LabelNode("equal");
        block.comment("if (compare != 0) return compare").dup().ifZeroGoto(equal).retInt().visitLabel(equal).pop(int.class);
        compareToMethod.getBody().append(block);
    }
    // values are equal
    compareToMethod.getBody().push(0).retInt();
}
Also used : LabelNode(io.airlift.bytecode.instruction.LabelNode) Type(io.trino.spi.type.Type) MethodDefinition(io.airlift.bytecode.MethodDefinition) BytecodeBlock(io.airlift.bytecode.BytecodeBlock) Parameter(io.airlift.bytecode.Parameter) SortOrder(io.trino.spi.connector.SortOrder) BytecodeExpression(io.airlift.bytecode.expression.BytecodeExpression) MethodHandle(java.lang.invoke.MethodHandle)

Example 18 with SortOrder

use of io.trino.spi.connector.SortOrder in project trino by trinodb.

the class QueryPlanner method orderingScheme.

private Optional<OrderingScheme> orderingScheme(PlanBuilder subPlan, Optional<OrderBy> orderBy, List<Expression> orderByExpressions) {
    if (orderBy.isEmpty() || (isSkipRedundantSort(session)) && analysis.isOrderByRedundant(orderBy.get())) {
        return Optional.empty();
    }
    Iterator<SortItem> sortItems = orderBy.get().getSortItems().iterator();
    ImmutableList.Builder<Symbol> orderBySymbols = ImmutableList.builder();
    Map<Symbol, SortOrder> orderings = new HashMap<>();
    for (Expression fieldOrExpression : orderByExpressions) {
        Symbol symbol = subPlan.translate(fieldOrExpression);
        SortItem sortItem = sortItems.next();
        if (!orderings.containsKey(symbol)) {
            orderBySymbols.add(symbol);
            orderings.put(symbol, sortItemToSortOrder(sortItem));
        }
    }
    return Optional.of(new OrderingScheme(orderBySymbols.build(), orderings));
}
Also used : SortItem(io.trino.sql.tree.SortItem) LinkedHashMap(java.util.LinkedHashMap) HashMap(java.util.HashMap) SelectExpression(io.trino.sql.analyzer.Analysis.SelectExpression) ComparisonExpression(io.trino.sql.tree.ComparisonExpression) IfExpression(io.trino.sql.tree.IfExpression) Expression(io.trino.sql.tree.Expression) LambdaExpression(io.trino.sql.tree.LambdaExpression) ImmutableList.toImmutableList(com.google.common.collect.ImmutableList.toImmutableList) ImmutableList(com.google.common.collect.ImmutableList) SortOrder(io.trino.spi.connector.SortOrder) OrderingScheme.sortItemToSortOrder(io.trino.sql.planner.OrderingScheme.sortItemToSortOrder)

Example 19 with SortOrder

use of io.trino.spi.connector.SortOrder in project trino by trinodb.

the class QueryPlanner method planWindowSpecification.

public static WindowNode.Specification planWindowSpecification(List<Expression> partitionBy, Optional<OrderBy> orderBy, Function<Expression, Symbol> expressionRewrite) {
    // Rewrite PARTITION BY
    ImmutableList.Builder<Symbol> partitionBySymbols = ImmutableList.builder();
    for (Expression expression : partitionBy) {
        partitionBySymbols.add(expressionRewrite.apply(expression));
    }
    // Rewrite ORDER BY
    LinkedHashMap<Symbol, SortOrder> orderings = new LinkedHashMap<>();
    for (SortItem item : getSortItemsFromOrderBy(orderBy)) {
        Symbol symbol = expressionRewrite.apply(item.getSortKey());
        // don't override existing keys, i.e. when "ORDER BY a ASC, a DESC" is specified
        orderings.putIfAbsent(symbol, sortItemToSortOrder(item));
    }
    Optional<OrderingScheme> orderingScheme = Optional.empty();
    if (!orderings.isEmpty()) {
        orderingScheme = Optional.of(new OrderingScheme(ImmutableList.copyOf(orderings.keySet()), orderings));
    }
    return new WindowNode.Specification(partitionBySymbols.build(), orderingScheme);
}
Also used : SortItem(io.trino.sql.tree.SortItem) SelectExpression(io.trino.sql.analyzer.Analysis.SelectExpression) ComparisonExpression(io.trino.sql.tree.ComparisonExpression) IfExpression(io.trino.sql.tree.IfExpression) Expression(io.trino.sql.tree.Expression) LambdaExpression(io.trino.sql.tree.LambdaExpression) ImmutableList.toImmutableList(com.google.common.collect.ImmutableList.toImmutableList) ImmutableList(com.google.common.collect.ImmutableList) SortOrder(io.trino.spi.connector.SortOrder) OrderingScheme.sortItemToSortOrder(io.trino.sql.planner.OrderingScheme.sortItemToSortOrder) QuerySpecification(io.trino.sql.tree.QuerySpecification) LinkedHashMap(java.util.LinkedHashMap)

Example 20 with SortOrder

use of io.trino.spi.connector.SortOrder in project trino by trinodb.

the class HiveWriterFactory method createWriter.

public HiveWriter createWriter(Page partitionColumns, int position, OptionalInt bucketNumber) {
    if (bucketCount.isPresent()) {
        checkArgument(bucketNumber.isPresent(), "Bucket not provided for bucketed table");
        checkArgument(bucketNumber.getAsInt() < bucketCount.getAsInt(), "Bucket number %s must be less than bucket count %s", bucketNumber, bucketCount);
    } else {
        checkArgument(bucketNumber.isEmpty(), "Bucket number provided by for table that is not bucketed");
    }
    List<String> partitionValues = createPartitionValues(partitionColumnTypes, partitionColumns, position);
    Optional<String> partitionName;
    if (!partitionColumnNames.isEmpty()) {
        partitionName = Optional.of(FileUtils.makePartName(partitionColumnNames, partitionValues));
    } else {
        partitionName = Optional.empty();
    }
    // attempt to get the existing partition (if this is an existing partitioned table)
    Optional<Partition> partition = Optional.empty();
    if (!partitionValues.isEmpty() && table != null) {
        partition = pageSinkMetadataProvider.getPartition(partitionValues);
    }
    UpdateMode updateMode;
    Properties schema;
    WriteInfo writeInfo;
    StorageFormat outputStorageFormat;
    if (partition.isEmpty()) {
        if (table == null) {
            // Write to: a new partition in a new partitioned table,
            // or a new unpartitioned table.
            updateMode = UpdateMode.NEW;
            schema = new Properties();
            schema.setProperty(IOConstants.COLUMNS, dataColumns.stream().map(DataColumn::getName).collect(joining(",")));
            schema.setProperty(IOConstants.COLUMNS_TYPES, dataColumns.stream().map(DataColumn::getHiveType).map(HiveType::getHiveTypeName).map(HiveTypeName::toString).collect(joining(":")));
            if (partitionName.isEmpty()) {
                // new unpartitioned table
                writeInfo = locationService.getTableWriteInfo(locationHandle, false);
            } else {
                // a new partition in a new partitioned table
                writeInfo = locationService.getPartitionWriteInfo(locationHandle, partition, partitionName.get());
                if (!writeInfo.getWriteMode().isWritePathSameAsTargetPath()) {
                    // verify that the target directory for the partition does not already exist
                    if (HiveWriteUtils.pathExists(new HdfsContext(session), hdfsEnvironment, writeInfo.getTargetPath())) {
                        throw new TrinoException(HIVE_PATH_ALREADY_EXISTS, format("Target directory for new partition '%s' of table '%s.%s' already exists: %s", partitionName, schemaName, tableName, writeInfo.getTargetPath()));
                    }
                }
            }
        } else {
            // or an existing unpartitioned table
            if (partitionName.isPresent()) {
                // a new partition in an existing partitioned table
                updateMode = UpdateMode.NEW;
                writeInfo = locationService.getPartitionWriteInfo(locationHandle, partition, partitionName.get());
            } else {
                switch(insertExistingPartitionsBehavior) {
                    case APPEND:
                        updateMode = UpdateMode.APPEND;
                        writeInfo = locationService.getTableWriteInfo(locationHandle, false);
                        break;
                    case OVERWRITE:
                        updateMode = UpdateMode.OVERWRITE;
                        writeInfo = locationService.getTableWriteInfo(locationHandle, true);
                        break;
                    case ERROR:
                        throw new TrinoException(HIVE_TABLE_READ_ONLY, "Unpartitioned Hive tables are immutable");
                    default:
                        throw new IllegalArgumentException("Unsupported insert existing table behavior: " + insertExistingPartitionsBehavior);
                }
            }
            schema = getHiveSchema(table);
        }
        if (partitionName.isPresent()) {
            // Write to a new partition
            outputStorageFormat = fromHiveStorageFormat(partitionStorageFormat);
        } else {
            // Write to a new/existing unpartitioned table
            outputStorageFormat = fromHiveStorageFormat(tableStorageFormat);
        }
    } else {
        switch(insertExistingPartitionsBehavior) {
            // Write to: an existing partition in an existing partitioned table
            case APPEND:
                // Append to an existing partition
                updateMode = UpdateMode.APPEND;
                // Check the column types in partition schema match the column types in table schema
                List<Column> tableColumns = table.getDataColumns();
                List<Column> existingPartitionColumns = partition.get().getColumns();
                for (int i = 0; i < min(existingPartitionColumns.size(), tableColumns.size()); i++) {
                    HiveType tableType = tableColumns.get(i).getType();
                    HiveType partitionType = existingPartitionColumns.get(i).getType();
                    if (!tableType.equals(partitionType)) {
                        throw new TrinoException(HIVE_PARTITION_SCHEMA_MISMATCH, format("" + "You are trying to write into an existing partition in a table. " + "The table schema has changed since the creation of the partition. " + "Inserting rows into such partition is not supported. " + "The column '%s' in table '%s' is declared as type '%s', " + "but partition '%s' declared column '%s' as type '%s'.", tableColumns.get(i).getName(), tableName, tableType, partitionName, existingPartitionColumns.get(i).getName(), partitionType));
                    }
                }
                HiveWriteUtils.checkPartitionIsWritable(partitionName.get(), partition.get());
                outputStorageFormat = partition.get().getStorage().getStorageFormat();
                schema = getHiveSchema(partition.get(), table);
                writeInfo = locationService.getPartitionWriteInfo(locationHandle, partition, partitionName.get());
                break;
            case OVERWRITE:
                // Overwrite an existing partition
                // 
                // The behavior of overwrite considered as if first dropping the partition and inserting a new partition, thus:
                // * No partition writable check is required.
                // * Table schema and storage format is used for the new partition (instead of existing partition schema and storage format).
                updateMode = UpdateMode.OVERWRITE;
                outputStorageFormat = fromHiveStorageFormat(partitionStorageFormat);
                schema = getHiveSchema(table);
                writeInfo = locationService.getPartitionWriteInfo(locationHandle, Optional.empty(), partitionName.get());
                break;
            case ERROR:
                throw new TrinoException(HIVE_PARTITION_READ_ONLY, "Cannot insert into an existing partition of Hive table: " + partitionName.get());
            default:
                throw new IllegalArgumentException(format("Unsupported insert existing partitions behavior: %s", insertExistingPartitionsBehavior));
        }
    }
    additionalTableParameters.forEach(schema::setProperty);
    validateSchema(partitionName, schema);
    int bucketToUse = bucketNumber.isEmpty() ? 0 : bucketNumber.getAsInt();
    Path path;
    String fileNameWithExtension;
    if (transaction.isAcidTransactionRunning()) {
        String subdir = computeAcidSubdir(transaction);
        Path subdirPath = new Path(writeInfo.getWritePath(), subdir);
        path = createHiveBucketPath(subdirPath, bucketToUse, table.getParameters());
        fileNameWithExtension = path.getName();
    } else {
        String fileName = computeFileName(bucketNumber);
        fileNameWithExtension = fileName + getFileExtension(conf, outputStorageFormat);
        path = new Path(writeInfo.getWritePath(), fileNameWithExtension);
    }
    boolean useAcidSchema = isCreateTransactionalTable || (table != null && isFullAcidTable(table.getParameters()));
    FileWriter hiveFileWriter = null;
    for (HiveFileWriterFactory fileWriterFactory : fileWriterFactories) {
        Optional<FileWriter> fileWriter = fileWriterFactory.createFileWriter(path, dataColumns.stream().map(DataColumn::getName).collect(toList()), outputStorageFormat, schema, conf, session, bucketNumber, transaction, useAcidSchema, WriterKind.INSERT);
        if (fileWriter.isPresent()) {
            hiveFileWriter = fileWriter.get();
            break;
        }
    }
    if (hiveFileWriter == null) {
        hiveFileWriter = new RecordFileWriter(path, dataColumns.stream().map(DataColumn::getName).collect(toList()), outputStorageFormat, schema, partitionStorageFormat.getEstimatedWriterMemoryUsage(), conf, typeManager, parquetTimeZone, session);
    }
    String writerImplementation = hiveFileWriter.getClass().getName();
    Consumer<HiveWriter> onCommit = hiveWriter -> {
        Optional<Long> size;
        try {
            size = Optional.of(hiveWriter.getWrittenBytes());
        } catch (RuntimeException e) {
            // Do not fail the query if file system is not available
            size = Optional.empty();
        }
        eventClient.post(new WriteCompletedEvent(session.getQueryId(), path.toString(), schemaName, tableName, partitionName.orElse(null), outputStorageFormat.getOutputFormat(), writerImplementation, nodeManager.getCurrentNode().getVersion(), nodeManager.getCurrentNode().getHost(), session.getIdentity().getPrincipal().map(Principal::getName).orElse(null), nodeManager.getEnvironment(), sessionProperties, size.orElse(null), hiveWriter.getRowCount()));
    };
    if (!sortedBy.isEmpty()) {
        FileSystem fileSystem;
        Path tempFilePath;
        if (sortedWritingTempStagingPathEnabled) {
            String tempPrefix = sortedWritingTempStagingPath.replace("${USER}", new HdfsContext(session).getIdentity().getUser());
            tempFilePath = new Path(tempPrefix, ".tmp-sort." + path.getParent().getName() + "." + path.getName());
        } else {
            tempFilePath = new Path(path.getParent(), ".tmp-sort." + path.getName());
        }
        try {
            Configuration configuration = new Configuration(conf);
            // Explicitly set the default FS to local file system to avoid getting HDFS when sortedWritingTempStagingPath specifies no scheme
            configuration.set(FS_DEFAULT_NAME_KEY, "file:///");
            fileSystem = hdfsEnvironment.getFileSystem(session.getIdentity(), tempFilePath, configuration);
        } catch (IOException e) {
            throw new TrinoException(HIVE_WRITER_OPEN_ERROR, e);
        }
        List<Type> types = dataColumns.stream().map(column -> column.getHiveType().getType(typeManager, getTimestampPrecision(session))).collect(toImmutableList());
        Map<String, Integer> columnIndexes = new HashMap<>();
        for (int i = 0; i < dataColumns.size(); i++) {
            columnIndexes.put(dataColumns.get(i).getName(), i);
        }
        List<Integer> sortFields = new ArrayList<>();
        List<SortOrder> sortOrders = new ArrayList<>();
        for (SortingColumn column : sortedBy) {
            Integer index = columnIndexes.get(column.getColumnName());
            if (index == null) {
                throw new TrinoException(HIVE_INVALID_METADATA, format("Sorting column '%s' does exist in table '%s.%s'", column.getColumnName(), schemaName, tableName));
            }
            sortFields.add(index);
            sortOrders.add(column.getOrder().getSortOrder());
        }
        hiveFileWriter = new SortingFileWriter(fileSystem, tempFilePath, hiveFileWriter, sortBufferSize, maxOpenSortFiles, types, sortFields, sortOrders, pageSorter, typeManager.getTypeOperators(), OrcFileWriterFactory::createOrcDataSink);
    }
    return new HiveWriter(hiveFileWriter, partitionName, updateMode, fileNameWithExtension, writeInfo.getWritePath().toString(), writeInfo.getTargetPath().toString(), onCommit, hiveWriterStats);
}
Also used : DateTimeZone(org.joda.time.DateTimeZone) InsertExistingPartitionsBehavior(io.trino.plugin.hive.HiveSessionProperties.InsertExistingPartitionsBehavior) OrcFileWriterFactory(io.trino.plugin.hive.orc.OrcFileWriterFactory) FileSystem(org.apache.hadoop.fs.FileSystem) NodeManager(io.trino.spi.NodeManager) CompressionConfigUtil.configureCompression(io.trino.plugin.hive.util.CompressionConfigUtil.configureCompression) DIRECT_TO_TARGET_EXISTING_DIRECTORY(io.trino.plugin.hive.LocationHandle.WriteMode.DIRECT_TO_TARGET_EXISTING_DIRECTORY) AcidUtils.isInsertOnlyTable(org.apache.hadoop.hive.ql.io.AcidUtils.isInsertOnlyTable) HiveUtil.getColumnNames(io.trino.plugin.hive.util.HiveUtil.getColumnNames) CompressionCodec(org.apache.hadoop.io.compress.CompressionCodec) Preconditions.checkArgument(com.google.common.base.Preconditions.checkArgument) Matcher(java.util.regex.Matcher) Column(io.trino.plugin.hive.metastore.Column) HiveIgnoreKeyTextOutputFormat(org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat) Collectors.toMap(java.util.stream.Collectors.toMap) HIVE_PARTITION_SCHEMA_MISMATCH(io.trino.plugin.hive.HiveErrorCode.HIVE_PARTITION_SCHEMA_MISMATCH) Maps.immutableEntry(com.google.common.collect.Maps.immutableEntry) PageSorter(io.trino.spi.PageSorter) Configuration(org.apache.hadoop.conf.Configuration) Map(java.util.Map) Path(org.apache.hadoop.fs.Path) HIVE_FILESYSTEM_ERROR(io.trino.plugin.hive.HiveErrorCode.HIVE_FILESYSTEM_ERROR) StorageFormat(io.trino.plugin.hive.metastore.StorageFormat) AcidTransaction(io.trino.plugin.hive.acid.AcidTransaction) ImmutableSet(com.google.common.collect.ImmutableSet) Table(io.trino.plugin.hive.metastore.Table) ImmutableMap(com.google.common.collect.ImmutableMap) AcidUtils.isFullAcidTable(org.apache.hadoop.hive.ql.io.AcidUtils.isFullAcidTable) ImmutableList.toImmutableList(com.google.common.collect.ImmutableList.toImmutableList) HiveSessionProperties.getTemporaryStagingDirectoryPath(io.trino.plugin.hive.HiveSessionProperties.getTemporaryStagingDirectoryPath) EventClient(io.airlift.event.client.EventClient) DefaultCodec(org.apache.hadoop.io.compress.DefaultCodec) Set(java.util.Set) TrinoException(io.trino.spi.TrinoException) UUID(java.util.UUID) Math.min(java.lang.Math.min) HIVE_PATH_ALREADY_EXISTS(io.trino.plugin.hive.HiveErrorCode.HIVE_PATH_ALREADY_EXISTS) MetastoreUtil.getHiveSchema(io.trino.plugin.hive.metastore.MetastoreUtil.getHiveSchema) AcidUtils.deleteDeltaSubdir(org.apache.hadoop.hive.ql.io.AcidUtils.deleteDeltaSubdir) Sets(com.google.common.collect.Sets) String.format(java.lang.String.format) Collectors.joining(java.util.stream.Collectors.joining) ReflectionUtil(org.apache.hive.common.util.ReflectionUtil) DataSize(io.airlift.units.DataSize) HdfsContext(io.trino.plugin.hive.HdfsEnvironment.HdfsContext) List(java.util.List) ImmutableMap.toImmutableMap(com.google.common.collect.ImmutableMap.toImmutableMap) Principal(java.security.Principal) AcidUtils.deltaSubdir(org.apache.hadoop.hive.ql.io.AcidUtils.deltaSubdir) HivePageSinkMetadataProvider(io.trino.plugin.hive.metastore.HivePageSinkMetadataProvider) Entry(java.util.Map.Entry) Function.identity(java.util.function.Function.identity) FileUtils(org.apache.hadoop.hive.common.FileUtils) Optional(java.util.Optional) Pattern(java.util.regex.Pattern) Partition(io.trino.plugin.hive.metastore.Partition) Type(io.trino.spi.type.Type) ConfigurationUtils.toJobConf(io.trino.plugin.hive.util.ConfigurationUtils.toJobConf) Page(io.trino.spi.Page) HashMap(java.util.HashMap) HiveWriteUtils.createPartitionValues(io.trino.plugin.hive.util.HiveWriteUtils.createPartitionValues) IOConstants(org.apache.hadoop.hive.ql.io.IOConstants) StorageFormat.fromHiveStorageFormat(io.trino.plugin.hive.metastore.StorageFormat.fromHiveStorageFormat) OptionalInt(java.util.OptionalInt) HIVE_PARTITION_READ_ONLY(io.trino.plugin.hive.HiveErrorCode.HIVE_PARTITION_READ_ONLY) ArrayList(java.util.ArrayList) HiveSessionProperties.getCompressionCodec(io.trino.plugin.hive.HiveSessionProperties.getCompressionCodec) HashSet(java.util.HashSet) Strings(com.google.common.base.Strings) HIVE_INVALID_METADATA(io.trino.plugin.hive.HiveErrorCode.HIVE_INVALID_METADATA) ImmutableList(com.google.common.collect.ImmutableList) COMPRESSRESULT(org.apache.hadoop.hive.conf.HiveConf.ConfVars.COMPRESSRESULT) Objects.requireNonNull(java.util.Objects.requireNonNull) HiveWriteUtils(io.trino.plugin.hive.util.HiveWriteUtils) HiveSessionProperties.isTemporaryStagingDirectoryEnabled(io.trino.plugin.hive.HiveSessionProperties.isTemporaryStagingDirectoryEnabled) HIVE_WRITER_OPEN_ERROR(io.trino.plugin.hive.HiveErrorCode.HIVE_WRITER_OPEN_ERROR) HiveSessionProperties.getInsertExistingPartitionsBehavior(io.trino.plugin.hive.HiveSessionProperties.getInsertExistingPartitionsBehavior) HiveUtil.getColumnTypes(io.trino.plugin.hive.util.HiveUtil.getColumnTypes) Properties(java.util.Properties) HiveConf(org.apache.hadoop.hive.conf.HiveConf) HIVE_UNSUPPORTED_FORMAT(io.trino.plugin.hive.HiveErrorCode.HIVE_UNSUPPORTED_FORMAT) HiveSessionProperties.getTimestampPrecision(io.trino.plugin.hive.HiveSessionProperties.getTimestampPrecision) IOException(java.io.IOException) ConnectorSession(io.trino.spi.connector.ConnectorSession) SortOrder(io.trino.spi.connector.SortOrder) JobConf(org.apache.hadoop.mapred.JobConf) Consumer(java.util.function.Consumer) UUID.randomUUID(java.util.UUID.randomUUID) Collectors.toList(java.util.stream.Collectors.toList) FS_DEFAULT_NAME_KEY(org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY) SortingColumn(io.trino.plugin.hive.metastore.SortingColumn) HIVE_TABLE_READ_ONLY(io.trino.plugin.hive.HiveErrorCode.HIVE_TABLE_READ_ONLY) UpdateMode(io.trino.plugin.hive.PartitionUpdate.UpdateMode) WriteInfo(io.trino.plugin.hive.LocationService.WriteInfo) TypeManager(io.trino.spi.type.TypeManager) Configuration(org.apache.hadoop.conf.Configuration) HashMap(java.util.HashMap) UpdateMode(io.trino.plugin.hive.PartitionUpdate.UpdateMode) ArrayList(java.util.ArrayList) StorageFormat(io.trino.plugin.hive.metastore.StorageFormat) StorageFormat.fromHiveStorageFormat(io.trino.plugin.hive.metastore.StorageFormat.fromHiveStorageFormat) Properties(java.util.Properties) Column(io.trino.plugin.hive.metastore.Column) SortingColumn(io.trino.plugin.hive.metastore.SortingColumn) WriteInfo(io.trino.plugin.hive.LocationService.WriteInfo) FileSystem(org.apache.hadoop.fs.FileSystem) HdfsContext(io.trino.plugin.hive.HdfsEnvironment.HdfsContext) Path(org.apache.hadoop.fs.Path) HiveSessionProperties.getTemporaryStagingDirectoryPath(io.trino.plugin.hive.HiveSessionProperties.getTemporaryStagingDirectoryPath) Partition(io.trino.plugin.hive.metastore.Partition) SortingColumn(io.trino.plugin.hive.metastore.SortingColumn) Optional(java.util.Optional) SortOrder(io.trino.spi.connector.SortOrder) IOException(java.io.IOException) Type(io.trino.spi.type.Type) TrinoException(io.trino.spi.TrinoException) Principal(java.security.Principal)

Aggregations

SortOrder (io.trino.spi.connector.SortOrder)20 Page (io.trino.spi.Page)14 Test (org.testng.annotations.Test)13 WindowOperatorFactory (io.trino.operator.WindowOperator.WindowOperatorFactory)12 OperatorAssertion.toMaterializedResult (io.trino.operator.OperatorAssertion.toMaterializedResult)11 MaterializedResult (io.trino.testing.MaterializedResult)11 ImmutableList (com.google.common.collect.ImmutableList)5 ImmutableList.toImmutableList (com.google.common.collect.ImmutableList.toImmutableList)4 Type (io.trino.spi.type.Type)4 ImmutableMap (com.google.common.collect.ImmutableMap)2 ImmutableMap.toImmutableMap (com.google.common.collect.ImmutableMap.toImmutableMap)2 BytecodeBlock (io.airlift.bytecode.BytecodeBlock)2 MethodDefinition (io.airlift.bytecode.MethodDefinition)2 Parameter (io.airlift.bytecode.Parameter)2 BytecodeExpression (io.airlift.bytecode.expression.BytecodeExpression)2 LabelNode (io.airlift.bytecode.instruction.LabelNode)2 SelectExpression (io.trino.sql.analyzer.Analysis.SelectExpression)2 OrderingScheme.sortItemToSortOrder (io.trino.sql.planner.OrderingScheme.sortItemToSortOrder)2 Symbol (io.trino.sql.planner.Symbol)2 ComparisonExpression (io.trino.sql.tree.ComparisonExpression)2