Search in sources :

Example 11 with OperatorContext

use of org.apache.drill.exec.ops.OperatorContext in project drill by apache.

the class TestAllocators method testAllocators.

@Test
public void testAllocators() throws Exception {
    // Setup a drillbit (initializes a root allocator)
    final DrillConfig config = DrillConfig.create(TEST_CONFIGURATIONS);
    try (final RemoteServiceSet serviceSet = RemoteServiceSet.getLocalServiceSet();
        final Drillbit bit = new Drillbit(config, serviceSet)) {
        bit.run();
        final DrillbitContext bitContext = bit.getContext();
        FunctionImplementationRegistry functionRegistry = bitContext.getFunctionImplementationRegistry();
        StoragePluginRegistry storageRegistry = new StoragePluginRegistryImpl(bitContext);
        // Create a few Fragment Contexts
        BitControl.PlanFragment.Builder pfBuilder1 = BitControl.PlanFragment.newBuilder();
        pfBuilder1.setMemInitial(1500000);
        BitControl.PlanFragment pf1 = pfBuilder1.build();
        BitControl.PlanFragment.Builder pfBuilder2 = BitControl.PlanFragment.newBuilder();
        pfBuilder2.setMemInitial(500000);
        BitControl.PlanFragment pf2 = pfBuilder1.build();
        FragmentContextImpl fragmentContext1 = new FragmentContextImpl(bitContext, pf1, null, functionRegistry);
        FragmentContextImpl fragmentContext2 = new FragmentContextImpl(bitContext, pf2, null, functionRegistry);
        // Get a few physical operators. Easiest way is to read a physical plan.
        PhysicalPlanReader planReader = PhysicalPlanReaderTestFactory.defaultPhysicalPlanReader(bitContext, storageRegistry);
        PhysicalPlan plan = planReader.readPhysicalPlan(Files.asCharSource(DrillFileUtils.getResourceAsFile(planFile), Charsets.UTF_8).read());
        List<PhysicalOperator> physicalOperators = plan.getSortedOperators();
        Iterator<PhysicalOperator> physicalOperatorIterator = physicalOperators.iterator();
        PhysicalOperator physicalOperator1 = physicalOperatorIterator.next();
        PhysicalOperator physicalOperator2 = physicalOperatorIterator.next();
        PhysicalOperator physicalOperator3 = physicalOperatorIterator.next();
        PhysicalOperator physicalOperator4 = physicalOperatorIterator.next();
        PhysicalOperator physicalOperator5 = physicalOperatorIterator.next();
        PhysicalOperator physicalOperator6 = physicalOperatorIterator.next();
        // Create some bogus Operator profile defs and stats to create operator contexts
        OpProfileDef def;
        OperatorStats stats;
        // Use some bogus operator type to create a new operator context.
        def = new OpProfileDef(physicalOperator1.getOperatorId(), MockSubScanPOP.OPERATOR_TYPE, OperatorUtilities.getChildCount(physicalOperator1));
        stats = fragmentContext1.getStats().newOperatorStats(def, fragmentContext1.getAllocator());
        // Add a couple of Operator Contexts
        // Initial allocation = 1000000 bytes for all operators
        OperatorContext oContext11 = fragmentContext1.newOperatorContext(physicalOperator1);
        DrillBuf b11 = oContext11.getAllocator().buffer(1000000);
        OperatorContext oContext12 = fragmentContext1.newOperatorContext(physicalOperator2, stats);
        DrillBuf b12 = oContext12.getAllocator().buffer(500000);
        OperatorContext oContext21 = fragmentContext1.newOperatorContext(physicalOperator3);
        def = new OpProfileDef(physicalOperator4.getOperatorId(), TextFormatPlugin.WRITER_OPERATOR_TYPE, OperatorUtilities.getChildCount(physicalOperator4));
        stats = fragmentContext2.getStats().newOperatorStats(def, fragmentContext2.getAllocator());
        OperatorContext oContext22 = fragmentContext2.newOperatorContext(physicalOperator4, stats);
        DrillBuf b22 = oContext22.getAllocator().buffer(2000000);
        // New Fragment begins
        BitControl.PlanFragment.Builder pfBuilder3 = BitControl.PlanFragment.newBuilder();
        pfBuilder3.setMemInitial(1000000);
        BitControl.PlanFragment pf3 = pfBuilder3.build();
        FragmentContextImpl fragmentContext3 = new FragmentContextImpl(bitContext, pf3, null, functionRegistry);
        // New fragment starts an operator that allocates an amount within the limit
        def = new OpProfileDef(physicalOperator5.getOperatorId(), UnionAll.OPERATOR_TYPE, OperatorUtilities.getChildCount(physicalOperator5));
        stats = fragmentContext3.getStats().newOperatorStats(def, fragmentContext3.getAllocator());
        OperatorContext oContext31 = fragmentContext3.newOperatorContext(physicalOperator5, stats);
        DrillBuf b31a = oContext31.getAllocator().buffer(200000);
        // Previously running operator completes
        b22.release();
        ((AutoCloseable) oContext22).close();
        // Fragment 3 asks for more and fails
        boolean outOfMem = false;
        try {
            oContext31.getAllocator().buffer(44000000);
            fail("Fragment 3 should fail to allocate buffer");
        } catch (OutOfMemoryException e) {
            // Expected.
            outOfMem = true;
        }
        assertTrue(outOfMem);
        // Operator is Exempt from Fragment limits. Fragment 3 asks for more and succeeds
        OperatorContext oContext32 = fragmentContext3.newOperatorContext(physicalOperator6);
        try {
            DrillBuf b32 = oContext32.getAllocator().buffer(4400000);
            b32.release();
        } catch (OutOfMemoryException e) {
            fail("Fragment 3 failed to allocate buffer");
        } finally {
            closeOp(oContext32);
        }
        b11.release();
        closeOp(oContext11);
        b12.release();
        closeOp(oContext12);
        closeOp(oContext21);
        b31a.release();
        closeOp(oContext31);
        fragmentContext1.close();
        fragmentContext2.close();
        fragmentContext3.close();
    }
}
Also used : DrillbitContext(org.apache.drill.exec.server.DrillbitContext) StoragePluginRegistry(org.apache.drill.exec.store.StoragePluginRegistry) PhysicalPlan(org.apache.drill.exec.physical.PhysicalPlan) OpProfileDef(org.apache.drill.exec.ops.OpProfileDef) PhysicalPlanReader(org.apache.drill.exec.planner.PhysicalPlanReader) BitControl(org.apache.drill.exec.proto.BitControl) FragmentContextImpl(org.apache.drill.exec.ops.FragmentContextImpl) OperatorStats(org.apache.drill.exec.ops.OperatorStats) DrillConfig(org.apache.drill.common.config.DrillConfig) Drillbit(org.apache.drill.exec.server.Drillbit) RemoteServiceSet(org.apache.drill.exec.server.RemoteServiceSet) PhysicalOperator(org.apache.drill.exec.physical.base.PhysicalOperator) StoragePluginRegistryImpl(org.apache.drill.exec.store.StoragePluginRegistryImpl) OperatorContext(org.apache.drill.exec.ops.OperatorContext) FunctionImplementationRegistry(org.apache.drill.exec.expr.fn.FunctionImplementationRegistry) OutOfMemoryException(org.apache.drill.exec.exception.OutOfMemoryException) DrillBuf(io.netty.buffer.DrillBuf) MemoryTest(org.apache.drill.categories.MemoryTest) DrillTest(org.apache.drill.test.DrillTest) Test(org.junit.Test)

Example 12 with OperatorContext

use of org.apache.drill.exec.ops.OperatorContext in project drill by apache.

the class DrillParquetReader method setup.

@Override
public void setup(OperatorContext context, OutputMutator output) throws ExecutionSetupException {
    try {
        this.operatorContext = context;
        schema = footer.getFileMetaData().getSchema();
        MessageType projection;
        final List<SchemaPath> columnsNotFound = new ArrayList<>(getColumns().size());
        if (isStarQuery()) {
            projection = schema;
        } else {
            projection = getProjection(schema, getColumns(), columnsNotFound);
            if (projection == null) {
                projection = schema;
            }
            if (!columnsNotFound.isEmpty()) {
                nullFilledVectors = new ArrayList<>(columnsNotFound.size());
                for (SchemaPath col : columnsNotFound) {
                    // col.toExpr() is used here as field name since we don't want to see these fields in the existing maps
                    nullFilledVectors.add(output.addField(MaterializedField.create(col.toExpr(), OPTIONAL_INT), NullableIntVector.class));
                }
                noColumnsFound = columnsNotFound.size() == getColumns().size();
            }
        }
        logger.debug("Requesting schema {}", projection);
        if (!noColumnsFound) {
            // Discard the columns not found in the schema when create DrillParquetRecordMaterializer, since they have been added to output already.
            @SuppressWarnings("unchecked") Collection<SchemaPath> columns = columnsNotFound.isEmpty() ? getColumns() : CollectionUtils.subtract(getColumns(), columnsNotFound);
            recordMaterializer = new DrillParquetRecordMaterializer(output, projection, columns, fragmentContext.getOptions(), containsCorruptedDates);
        }
        if (numRecordsToRead == 0 || noColumnsFound) {
            // no need to init readers
            return;
        }
        ColumnIOFactory factory = new ColumnIOFactory(false);
        MessageColumnIO columnIO = factory.getColumnIO(projection, schema);
        BlockMetaData blockMetaData = footer.getBlocks().get(entry.getRowGroupIndex());
        Map<ColumnPath, ColumnChunkMetaData> paths = blockMetaData.getColumns().stream().collect(Collectors.toMap(ColumnChunkMetaData::getPath, Function.identity(), (o, n) -> n));
        BufferAllocator allocator = operatorContext.getAllocator();
        CompressionCodecFactory ccf = DrillCompressionCodecFactory.createDirectCodecFactory(drillFileSystem.getConf(), new ParquetDirectByteBufferAllocator(allocator), 0);
        pageReadStore = new ColumnChunkIncReadStore(numRecordsToRead, ccf, allocator, drillFileSystem, entry.getPath());
        for (String[] path : schema.getPaths()) {
            Type type = schema.getType(path);
            if (type.isPrimitive()) {
                ColumnChunkMetaData md = paths.get(ColumnPath.get(path));
                pageReadStore.addColumn(schema.getColumnDescription(path), md);
            }
        }
        recordReader = columnIO.getRecordReader(pageReadStore, recordMaterializer);
    } catch (Exception e) {
        throw handleAndRaise("Failure in setting up reader", e);
    }
}
Also used : Arrays(java.util.Arrays) BufferAllocator(org.apache.drill.exec.memory.BufferAllocator) ParquetDirectByteBufferAllocator(org.apache.drill.exec.store.parquet.ParquetDirectByteBufferAllocator) ParquetReaderUtility(org.apache.drill.exec.store.parquet.ParquetReaderUtility) ColumnIOFactory(org.apache.parquet.io.ColumnIOFactory) LoggerFactory(org.slf4j.LoggerFactory) OutputMutator(org.apache.drill.exec.physical.impl.OutputMutator) OperatorContext(org.apache.drill.exec.ops.OperatorContext) DrillFileSystem(org.apache.drill.exec.store.dfs.DrillFileSystem) PathSegment(org.apache.drill.common.expression.PathSegment) Map(java.util.Map) RowGroupReadEntry(org.apache.drill.exec.store.parquet.RowGroupReadEntry) Types(org.apache.parquet.schema.Types) ValueVector(org.apache.drill.exec.vector.ValueVector) GroupType(org.apache.parquet.schema.GroupType) Collection(java.util.Collection) SchemaPath(org.apache.drill.common.expression.SchemaPath) Set(java.util.Set) Collectors(java.util.stream.Collectors) ColumnChunkMetaData(org.apache.parquet.hadoop.metadata.ColumnChunkMetaData) MessageType(org.apache.parquet.schema.MessageType) List(java.util.List) ColumnDescriptor(org.apache.parquet.column.ColumnDescriptor) BlockMetaData(org.apache.parquet.hadoop.metadata.BlockMetaData) Preconditions(org.apache.drill.shaded.guava.com.google.common.base.Preconditions) Type(org.apache.parquet.schema.Type) ExecConstants(org.apache.drill.exec.ExecConstants) MessageColumnIO(org.apache.parquet.io.MessageColumnIO) ColumnPath(org.apache.parquet.hadoop.metadata.ColumnPath) NullableIntVector(org.apache.drill.exec.vector.NullableIntVector) MaterializedField(org.apache.drill.exec.record.MaterializedField) Function(java.util.function.Function) CommonParquetRecordReader(org.apache.drill.exec.store.CommonParquetRecordReader) ArrayList(java.util.ArrayList) OutOfMemoryException(org.apache.drill.exec.exception.OutOfMemoryException) AllocationHelper(org.apache.drill.exec.vector.AllocationHelper) CollectionUtils(org.apache.commons.collections.CollectionUtils) ExecutionSetupException(org.apache.drill.common.exceptions.ExecutionSetupException) CompressionCodecFactory(org.apache.parquet.compression.CompressionCodecFactory) DrillCompressionCodecFactory(org.apache.drill.exec.store.parquet.compression.DrillCompressionCodecFactory) LinkedList(java.util.LinkedList) LinkedHashSet(java.util.LinkedHashSet) FragmentContext(org.apache.drill.exec.ops.FragmentContext) Logger(org.slf4j.Logger) IOException(java.io.IOException) ColumnChunkIncReadStore(org.apache.parquet.hadoop.ColumnChunkIncReadStore) StringJoiner(java.util.StringJoiner) ParquetMetadata(org.apache.parquet.hadoop.metadata.ParquetMetadata) OPTIONAL_INT(org.apache.drill.common.types.Types.OPTIONAL_INT) RecordReader(org.apache.parquet.io.RecordReader) BlockMetaData(org.apache.parquet.hadoop.metadata.BlockMetaData) ParquetDirectByteBufferAllocator(org.apache.drill.exec.store.parquet.ParquetDirectByteBufferAllocator) ColumnChunkMetaData(org.apache.parquet.hadoop.metadata.ColumnChunkMetaData) ArrayList(java.util.ArrayList) ColumnPath(org.apache.parquet.hadoop.metadata.ColumnPath) MessageColumnIO(org.apache.parquet.io.MessageColumnIO) OutOfMemoryException(org.apache.drill.exec.exception.OutOfMemoryException) ExecutionSetupException(org.apache.drill.common.exceptions.ExecutionSetupException) IOException(java.io.IOException) ColumnIOFactory(org.apache.parquet.io.ColumnIOFactory) BufferAllocator(org.apache.drill.exec.memory.BufferAllocator) ParquetDirectByteBufferAllocator(org.apache.drill.exec.store.parquet.ParquetDirectByteBufferAllocator) NullableIntVector(org.apache.drill.exec.vector.NullableIntVector) GroupType(org.apache.parquet.schema.GroupType) MessageType(org.apache.parquet.schema.MessageType) Type(org.apache.parquet.schema.Type) CompressionCodecFactory(org.apache.parquet.compression.CompressionCodecFactory) DrillCompressionCodecFactory(org.apache.drill.exec.store.parquet.compression.DrillCompressionCodecFactory) SchemaPath(org.apache.drill.common.expression.SchemaPath) ColumnChunkIncReadStore(org.apache.parquet.hadoop.ColumnChunkIncReadStore) MessageType(org.apache.parquet.schema.MessageType)

Example 13 with OperatorContext

use of org.apache.drill.exec.ops.OperatorContext in project drill by apache.

the class EasyFormatPlugin method getReaderBatch.

@SuppressWarnings("resource")
CloseableRecordBatch getReaderBatch(FragmentContext context, EasySubScan scan) throws ExecutionSetupException {
    final ImplicitColumnExplorer columnExplorer = new ImplicitColumnExplorer(context, scan.getColumns());
    if (!columnExplorer.isStarQuery()) {
        scan = new EasySubScan(scan.getUserName(), scan.getWorkUnits(), scan.getFormatPlugin(), columnExplorer.getTableColumns(), scan.getSelectionRoot());
        scan.setOperatorId(scan.getOperatorId());
    }
    OperatorContext oContext = context.newOperatorContext(scan);
    final DrillFileSystem dfs;
    try {
        dfs = oContext.newFileSystem(fsConf);
    } catch (IOException e) {
        throw new ExecutionSetupException(String.format("Failed to create FileSystem: %s", e.getMessage()), e);
    }
    List<RecordReader> readers = Lists.newArrayList();
    List<Map<String, String>> implicitColumns = Lists.newArrayList();
    Map<String, String> mapWithMaxColumns = Maps.newLinkedHashMap();
    for (FileWork work : scan.getWorkUnits()) {
        RecordReader recordReader = getRecordReader(context, dfs, work, scan.getColumns(), scan.getUserName());
        readers.add(recordReader);
        Map<String, String> implicitValues = columnExplorer.populateImplicitColumns(work, scan.getSelectionRoot());
        implicitColumns.add(implicitValues);
        if (implicitValues.size() > mapWithMaxColumns.size()) {
            mapWithMaxColumns = implicitValues;
        }
    }
    // all readers should have the same number of implicit columns, add missing ones with value null
    Map<String, String> diff = Maps.transformValues(mapWithMaxColumns, Functions.constant((String) null));
    for (Map<String, String> map : implicitColumns) {
        map.putAll(Maps.difference(map, diff).entriesOnlyOnRight());
    }
    return new ScanBatch(scan, context, oContext, readers.iterator(), implicitColumns);
}
Also used : ImplicitColumnExplorer(org.apache.drill.exec.store.ImplicitColumnExplorer) ExecutionSetupException(org.apache.drill.common.exceptions.ExecutionSetupException) RecordReader(org.apache.drill.exec.store.RecordReader) CompleteFileWork(org.apache.drill.exec.store.schedule.CompleteFileWork) IOException(java.io.IOException) DrillFileSystem(org.apache.drill.exec.store.dfs.DrillFileSystem) OperatorContext(org.apache.drill.exec.ops.OperatorContext) ScanBatch(org.apache.drill.exec.physical.impl.ScanBatch) Map(java.util.Map)

Example 14 with OperatorContext

use of org.apache.drill.exec.ops.OperatorContext in project drill by apache.

the class ParquetScanBatchCreator method getBatch.

@Override
public ScanBatch getBatch(FragmentContext context, ParquetRowGroupScan rowGroupScan, List<RecordBatch> children) throws ExecutionSetupException {
    Preconditions.checkArgument(children.isEmpty());
    OperatorContext oContext = context.newOperatorContext(rowGroupScan);
    final ImplicitColumnExplorer columnExplorer = new ImplicitColumnExplorer(context, rowGroupScan.getColumns());
    if (!columnExplorer.isStarQuery()) {
        rowGroupScan = new ParquetRowGroupScan(rowGroupScan.getUserName(), rowGroupScan.getStorageEngine(), rowGroupScan.getRowGroupReadEntries(), columnExplorer.getTableColumns(), rowGroupScan.getSelectionRoot(), rowGroupScan.getFilter());
        rowGroupScan.setOperatorId(rowGroupScan.getOperatorId());
    }
    DrillFileSystem fs;
    try {
        boolean useAsyncPageReader = context.getOptions().getOption(ExecConstants.PARQUET_PAGEREADER_ASYNC).bool_val;
        if (useAsyncPageReader) {
            fs = oContext.newNonTrackingFileSystem(rowGroupScan.getStorageEngine().getFsConf());
        } else {
            fs = oContext.newFileSystem(rowGroupScan.getStorageEngine().getFsConf());
        }
    } catch (IOException e) {
        throw new ExecutionSetupException(String.format("Failed to create DrillFileSystem: %s", e.getMessage()), e);
    }
    Configuration conf = new Configuration(fs.getConf());
    conf.setBoolean(ENABLE_BYTES_READ_COUNTER, false);
    conf.setBoolean(ENABLE_BYTES_TOTAL_COUNTER, false);
    conf.setBoolean(ENABLE_TIME_READ_COUNTER, false);
    // keep footers in a map to avoid re-reading them
    Map<String, ParquetMetadata> footers = Maps.newHashMap();
    List<RecordReader> readers = Lists.newArrayList();
    List<Map<String, String>> implicitColumns = Lists.newArrayList();
    Map<String, String> mapWithMaxColumns = Maps.newLinkedHashMap();
    for (RowGroupReadEntry e : rowGroupScan.getRowGroupReadEntries()) {
        /*
      Here we could store a map from file names to footers, to prevent re-reading the footer for each row group in a file
      TODO - to prevent reading the footer again in the parquet record reader (it is read earlier in the ParquetStorageEngine)
      we should add more information to the RowGroupInfo that will be populated upon the first read to
      provide the reader with all of th file meta-data it needs
      These fields will be added to the constructor below
      */
        try {
            Stopwatch timer = Stopwatch.createUnstarted();
            if (!footers.containsKey(e.getPath())) {
                timer.start();
                ParquetMetadata footer = ParquetFileReader.readFooter(conf, new Path(e.getPath()));
                long timeToRead = timer.elapsed(TimeUnit.MICROSECONDS);
                logger.trace("ParquetTrace,Read Footer,{},{},{},{},{},{},{}", "", e.getPath(), "", 0, 0, 0, timeToRead);
                footers.put(e.getPath(), footer);
            }
            boolean autoCorrectCorruptDates = rowGroupScan.formatConfig.autoCorrectCorruptDates;
            ParquetReaderUtility.DateCorruptionStatus containsCorruptDates = ParquetReaderUtility.detectCorruptDates(footers.get(e.getPath()), rowGroupScan.getColumns(), autoCorrectCorruptDates);
            if (logger.isDebugEnabled()) {
                logger.debug(containsCorruptDates.toString());
            }
            if (!context.getOptions().getOption(ExecConstants.PARQUET_NEW_RECORD_READER).bool_val && !isComplex(footers.get(e.getPath()))) {
                readers.add(new ParquetRecordReader(context, e.getPath(), e.getRowGroupIndex(), e.getNumRecordsToRead(), fs, CodecFactory.createDirectCodecFactory(fs.getConf(), new ParquetDirectByteBufferAllocator(oContext.getAllocator()), 0), footers.get(e.getPath()), rowGroupScan.getColumns(), containsCorruptDates));
            } else {
                ParquetMetadata footer = footers.get(e.getPath());
                readers.add(new DrillParquetReader(context, footer, e, columnExplorer.getTableColumns(), fs, containsCorruptDates));
            }
            Map<String, String> implicitValues = columnExplorer.populateImplicitColumns(e, rowGroupScan.getSelectionRoot());
            implicitColumns.add(implicitValues);
            if (implicitValues.size() > mapWithMaxColumns.size()) {
                mapWithMaxColumns = implicitValues;
            }
        } catch (IOException e1) {
            throw new ExecutionSetupException(e1);
        }
    }
    // all readers should have the same number of implicit columns, add missing ones with value null
    Map<String, String> diff = Maps.transformValues(mapWithMaxColumns, Functions.constant((String) null));
    for (Map<String, String> map : implicitColumns) {
        map.putAll(Maps.difference(map, diff).entriesOnlyOnRight());
    }
    return new ScanBatch(rowGroupScan, context, oContext, readers.iterator(), implicitColumns);
}
Also used : ImplicitColumnExplorer(org.apache.drill.exec.store.ImplicitColumnExplorer) ExecutionSetupException(org.apache.drill.common.exceptions.ExecutionSetupException) Configuration(org.apache.hadoop.conf.Configuration) ParquetMetadata(org.apache.parquet.hadoop.metadata.ParquetMetadata) ParquetRecordReader(org.apache.drill.exec.store.parquet.columnreaders.ParquetRecordReader) RecordReader(org.apache.drill.exec.store.RecordReader) Stopwatch(com.google.common.base.Stopwatch) DrillFileSystem(org.apache.drill.exec.store.dfs.DrillFileSystem) OperatorContext(org.apache.drill.exec.ops.OperatorContext) ScanBatch(org.apache.drill.exec.physical.impl.ScanBatch) Path(org.apache.hadoop.fs.Path) DrillParquetReader(org.apache.drill.exec.store.parquet2.DrillParquetReader) IOException(java.io.IOException) ParquetRecordReader(org.apache.drill.exec.store.parquet.columnreaders.ParquetRecordReader) Map(java.util.Map)

Example 15 with OperatorContext

use of org.apache.drill.exec.ops.OperatorContext in project drill by axbaretto.

the class TestSortImpl method makeSortImpl.

/**
 * Create the sort implementation to be used by test.
 *
 * @param fixture operator fixture
 * @param sortOrder sort order as specified by {@link Ordering}
 * @param nullOrder null order as specified by {@link Ordering}
 * @param outputBatch where the sort should write its output
 * @return the sort initialized sort implementation, ready to
 * do work
 */
public static SortImpl makeSortImpl(OperatorFixture fixture, String sortOrder, String nullOrder, VectorContainer outputBatch) {
    FieldReference expr = FieldReference.getWithQuotedRef("key");
    Ordering ordering = new Ordering(sortOrder, expr, nullOrder);
    Sort popConfig = new Sort(null, Lists.newArrayList(ordering), false);
    OperatorContext opContext = fixture.newOperatorContext(popConfig);
    QueryId queryId = QueryId.newBuilder().setPart1(1234).setPart2(5678).build();
    FragmentHandle handle = FragmentHandle.newBuilder().setMajorFragmentId(2).setMinorFragmentId(3).setQueryId(queryId).build();
    SortConfig sortConfig = new SortConfig(opContext.getFragmentContext().getConfig(), opContext.getFragmentContext().getOptions());
    SpillSet spillSet = new SpillSet(opContext.getFragmentContext().getConfig(), handle, popConfig);
    PriorityQueueCopierWrapper copierHolder = new PriorityQueueCopierWrapper(opContext);
    SpilledRuns spilledRuns = new SpilledRuns(opContext, spillSet, copierHolder);
    return new SortImpl(opContext, sortConfig, spilledRuns, outputBatch);
}
Also used : FieldReference(org.apache.drill.common.expression.FieldReference) OperatorContext(org.apache.drill.exec.ops.OperatorContext) QueryId(org.apache.drill.exec.proto.UserBitShared.QueryId) Ordering(org.apache.drill.common.logical.data.Order.Ordering) Sort(org.apache.drill.exec.physical.config.Sort) FragmentHandle(org.apache.drill.exec.proto.ExecProtos.FragmentHandle) SpillSet(org.apache.drill.exec.physical.impl.spill.SpillSet)

Aggregations

OperatorContext (org.apache.drill.exec.ops.OperatorContext)23 IOException (java.io.IOException)9 Map (java.util.Map)8 ExecutionSetupException (org.apache.drill.common.exceptions.ExecutionSetupException)8 Test (org.junit.Test)8 ScanBatch (org.apache.drill.exec.physical.impl.ScanBatch)7 RecordReader (org.apache.drill.exec.store.RecordReader)7 DrillFileSystem (org.apache.drill.exec.store.dfs.DrillFileSystem)6 LinkedList (java.util.LinkedList)5 OperatorTest (org.apache.drill.categories.OperatorTest)4 PhysicalOperator (org.apache.drill.exec.physical.base.PhysicalOperator)4 RowSet (org.apache.drill.exec.physical.rowSet.RowSet)4 VectorContainer (org.apache.drill.exec.record.VectorContainer)4 ParquetMetadata (org.apache.parquet.hadoop.metadata.ParquetMetadata)4 DrillBuf (io.netty.buffer.DrillBuf)3 ArrayList (java.util.ArrayList)3 SchemaPath (org.apache.drill.common.expression.SchemaPath)3 OutOfMemoryException (org.apache.drill.exec.exception.OutOfMemoryException)3 Sort (org.apache.drill.exec.physical.config.Sort)3 MockRecordBatch (org.apache.drill.exec.physical.impl.MockRecordBatch)3