Search in sources :

Example 1 with ColumnPath

use of org.apache.parquet.hadoop.metadata.ColumnPath in project drill by apache.

the class DrillParquetReader method setup.

@Override
public void setup(OperatorContext context, OutputMutator output) throws ExecutionSetupException {
    try {
        this.operatorContext = context;
        schema = footer.getFileMetaData().getSchema();
        MessageType projection = null;
        if (isStarQuery()) {
            projection = schema;
        } else {
            columnsNotFound = new ArrayList<SchemaPath>();
            projection = getProjection(schema, getColumns(), columnsNotFound);
            if (projection == null) {
                projection = schema;
            }
            if (columnsNotFound != null && columnsNotFound.size() > 0) {
                nullFilledVectors = new ArrayList<>();
                for (SchemaPath col : columnsNotFound) {
                    nullFilledVectors.add((NullableIntVector) output.addField(MaterializedField.create(col.getAsUnescapedPath(), org.apache.drill.common.types.Types.optional(TypeProtos.MinorType.INT)), (Class<? extends ValueVector>) TypeHelper.getValueVectorClass(TypeProtos.MinorType.INT, TypeProtos.DataMode.OPTIONAL)));
                }
                if (columnsNotFound.size() == getColumns().size()) {
                    noColumnsFound = true;
                }
            }
        }
        logger.debug("Requesting schema {}", projection);
        ColumnIOFactory factory = new ColumnIOFactory(false);
        MessageColumnIO columnIO = factory.getColumnIO(projection, schema);
        Map<ColumnPath, ColumnChunkMetaData> paths = new HashMap<>();
        for (ColumnChunkMetaData md : footer.getBlocks().get(entry.getRowGroupIndex()).getColumns()) {
            paths.put(md.getPath(), md);
        }
        Path filePath = new Path(entry.getPath());
        BlockMetaData blockMetaData = footer.getBlocks().get(entry.getRowGroupIndex());
        recordCount = (int) blockMetaData.getRowCount();
        pageReadStore = new ColumnChunkIncReadStore(recordCount, CodecFactory.createDirectCodecFactory(fileSystem.getConf(), new ParquetDirectByteBufferAllocator(operatorContext.getAllocator()), 0), operatorContext.getAllocator(), fileSystem, filePath);
        for (String[] path : schema.getPaths()) {
            Type type = schema.getType(path);
            if (type.isPrimitive()) {
                ColumnChunkMetaData md = paths.get(ColumnPath.get(path));
                pageReadStore.addColumn(schema.getColumnDescription(path), md);
            }
        }
        if (!noColumnsFound) {
            writer = new VectorContainerWriter(output);
            // Discard the columns not found in the schema when create DrillParquetRecordMaterializer, since they have been added to output already.
            final Collection<SchemaPath> columns = columnsNotFound == null || columnsNotFound.size() == 0 ? getColumns() : CollectionUtils.subtract(getColumns(), columnsNotFound);
            recordMaterializer = new DrillParquetRecordMaterializer(output, writer, projection, columns, fragmentContext.getOptions(), containsCorruptedDates);
            primitiveVectors = writer.getMapVector().getPrimitiveVectors();
            recordReader = columnIO.getRecordReader(pageReadStore, recordMaterializer);
        }
    } catch (Exception e) {
        handleAndRaise("Failure in setting up reader", e);
    }
}
Also used : ColumnPath(org.apache.parquet.hadoop.metadata.ColumnPath) Path(org.apache.hadoop.fs.Path) SchemaPath(org.apache.drill.common.expression.SchemaPath) BlockMetaData(org.apache.parquet.hadoop.metadata.BlockMetaData) ParquetDirectByteBufferAllocator(org.apache.drill.exec.store.parquet.ParquetDirectByteBufferAllocator) VectorContainerWriter(org.apache.drill.exec.vector.complex.impl.VectorContainerWriter) HashMap(java.util.HashMap) ColumnChunkMetaData(org.apache.parquet.hadoop.metadata.ColumnChunkMetaData) ColumnPath(org.apache.parquet.hadoop.metadata.ColumnPath) MessageColumnIO(org.apache.parquet.io.MessageColumnIO) DrillRuntimeException(org.apache.drill.common.exceptions.DrillRuntimeException) OutOfMemoryException(org.apache.drill.exec.exception.OutOfMemoryException) ExecutionSetupException(org.apache.drill.common.exceptions.ExecutionSetupException) IOException(java.io.IOException) ColumnIOFactory(org.apache.parquet.io.ColumnIOFactory) GroupType(org.apache.parquet.schema.GroupType) MessageType(org.apache.parquet.schema.MessageType) Type(org.apache.parquet.schema.Type) SchemaPath(org.apache.drill.common.expression.SchemaPath) ColumnChunkIncReadStore(org.apache.parquet.hadoop.ColumnChunkIncReadStore) MessageType(org.apache.parquet.schema.MessageType)

Aggregations

IOException (java.io.IOException)1 HashMap (java.util.HashMap)1 DrillRuntimeException (org.apache.drill.common.exceptions.DrillRuntimeException)1 ExecutionSetupException (org.apache.drill.common.exceptions.ExecutionSetupException)1 SchemaPath (org.apache.drill.common.expression.SchemaPath)1 OutOfMemoryException (org.apache.drill.exec.exception.OutOfMemoryException)1 ParquetDirectByteBufferAllocator (org.apache.drill.exec.store.parquet.ParquetDirectByteBufferAllocator)1 VectorContainerWriter (org.apache.drill.exec.vector.complex.impl.VectorContainerWriter)1 Path (org.apache.hadoop.fs.Path)1 ColumnChunkIncReadStore (org.apache.parquet.hadoop.ColumnChunkIncReadStore)1 BlockMetaData (org.apache.parquet.hadoop.metadata.BlockMetaData)1 ColumnChunkMetaData (org.apache.parquet.hadoop.metadata.ColumnChunkMetaData)1 ColumnPath (org.apache.parquet.hadoop.metadata.ColumnPath)1 ColumnIOFactory (org.apache.parquet.io.ColumnIOFactory)1 MessageColumnIO (org.apache.parquet.io.MessageColumnIO)1 GroupType (org.apache.parquet.schema.GroupType)1 MessageType (org.apache.parquet.schema.MessageType)1 Type (org.apache.parquet.schema.Type)1