use of org.apache.drill.exec.physical.impl.OutputMutator in project drill by apache.
the class TestScanBatchWriters method sanityTest.
@Test
public void sanityTest() throws Exception {
Scan scanConfig = new AbstractSubScan("bob") {
@Override
public String getOperatorType() {
return "";
}
};
OperatorContext opContext = fixture.newOperatorContext(scanConfig);
// Setup: normally done by ScanBatch
VectorContainer container = new VectorContainer(fixture.allocator());
OutputMutator output = new ScanBatch.Mutator(opContext, fixture.allocator(), container);
DrillBuf buffer = opContext.getManagedBuffer();
try (VectorContainerWriter writer = new VectorContainerWriter(output)) {
// Per-batch
writer.allocate();
writer.reset();
BaseWriter.MapWriter map = writer.rootAsMap();
// Write one record (10, "Fred", [100, 110, 120] )
map.integer("a").writeInt(10);
byte[] bytes = "Fred".getBytes("UTF-8");
buffer.setBytes(0, bytes, 0, bytes.length);
map.varChar("b").writeVarChar(0, bytes.length, buffer);
try (ListWriter list = map.list("c")) {
list.startList();
list.integer().writeInt(100);
list.integer().writeInt(110);
list.integer().writeInt(120);
list.endList();
// Write another record: (20, "Wilma", [])
writer.setPosition(1);
map.integer("a").writeInt(20);
bytes = "Wilma".getBytes("UTF-8");
buffer.setBytes(0, bytes, 0, bytes.length);
map.varChar("b").writeVarChar(0, bytes.length, buffer);
writer.setValueCount(2);
// Wrap-up done by ScanBatch
container.setRecordCount(2);
container.buildSchema(SelectionVectorMode.NONE);
RowSet rowSet = fixture.wrap(container);
// Expected
TupleMetadata schema = new SchemaBuilder().addNullable("a", MinorType.INT).addNullable("b", MinorType.VARCHAR).addArray("c", MinorType.INT).buildSchema();
RowSet expected = fixture.rowSetBuilder(schema).addRow(10, "Fred", new int[] { 100, 110, 120 }).addRow(20, "Wilma", null).build();
new RowSetComparison(expected).verifyAndClearAll(rowSet);
}
} finally {
opContext.close();
}
}
use of org.apache.drill.exec.physical.impl.OutputMutator in project drill by apache.
the class DrillParquetReader method setup.
@Override
public void setup(OperatorContext context, OutputMutator output) throws ExecutionSetupException {
try {
this.operatorContext = context;
schema = footer.getFileMetaData().getSchema();
MessageType projection;
final List<SchemaPath> columnsNotFound = new ArrayList<>(getColumns().size());
if (isStarQuery()) {
projection = schema;
} else {
projection = getProjection(schema, getColumns(), columnsNotFound);
if (projection == null) {
projection = schema;
}
if (!columnsNotFound.isEmpty()) {
nullFilledVectors = new ArrayList<>(columnsNotFound.size());
for (SchemaPath col : columnsNotFound) {
// col.toExpr() is used here as field name since we don't want to see these fields in the existing maps
nullFilledVectors.add(output.addField(MaterializedField.create(col.toExpr(), OPTIONAL_INT), NullableIntVector.class));
}
noColumnsFound = columnsNotFound.size() == getColumns().size();
}
}
logger.debug("Requesting schema {}", projection);
if (!noColumnsFound) {
// Discard the columns not found in the schema when create DrillParquetRecordMaterializer, since they have been added to output already.
@SuppressWarnings("unchecked") Collection<SchemaPath> columns = columnsNotFound.isEmpty() ? getColumns() : CollectionUtils.subtract(getColumns(), columnsNotFound);
recordMaterializer = new DrillParquetRecordMaterializer(output, projection, columns, fragmentContext.getOptions(), containsCorruptedDates);
}
if (numRecordsToRead == 0 || noColumnsFound) {
// no need to init readers
return;
}
ColumnIOFactory factory = new ColumnIOFactory(false);
MessageColumnIO columnIO = factory.getColumnIO(projection, schema);
BlockMetaData blockMetaData = footer.getBlocks().get(entry.getRowGroupIndex());
Map<ColumnPath, ColumnChunkMetaData> paths = blockMetaData.getColumns().stream().collect(Collectors.toMap(ColumnChunkMetaData::getPath, Function.identity(), (o, n) -> n));
BufferAllocator allocator = operatorContext.getAllocator();
CompressionCodecFactory ccf = DrillCompressionCodecFactory.createDirectCodecFactory(drillFileSystem.getConf(), new ParquetDirectByteBufferAllocator(allocator), 0);
pageReadStore = new ColumnChunkIncReadStore(numRecordsToRead, ccf, allocator, drillFileSystem, entry.getPath());
for (String[] path : schema.getPaths()) {
Type type = schema.getType(path);
if (type.isPrimitive()) {
ColumnChunkMetaData md = paths.get(ColumnPath.get(path));
pageReadStore.addColumn(schema.getColumnDescription(path), md);
}
}
recordReader = columnIO.getRecordReader(pageReadStore, recordMaterializer);
} catch (Exception e) {
throw handleAndRaise("Failure in setting up reader", e);
}
}
Aggregations