use of org.apache.drill.exec.vector.complex.impl.VectorContainerWriter in project drill by axbaretto.
the class KafkaRecordReader method setup.
@Override
public void setup(OperatorContext context, OutputMutator output) throws ExecutionSetupException {
this.writer = new VectorContainerWriter(output, unionEnabled);
messageReader = MessageReaderFactory.getMessageReader(kafkaMsgReader);
messageReader.init(context.getManagedBuffer(), Lists.newArrayList(getColumns()), this.writer, this.enableAllTextMode, this.readNumbersAsDouble);
msgItr = new MessageIterator(messageReader.getConsumer(plugin), subScanSpec, kafkaPollTimeOut);
}
use of org.apache.drill.exec.vector.complex.impl.VectorContainerWriter in project drill by apache.
the class TestPromotableWriter method list.
@Test
public void list() throws Exception {
BufferAllocator allocator = RootAllocatorFactory.newRoot(DrillConfig.create());
TestOutputMutator output = new TestOutputMutator(allocator);
ComplexWriter rootWriter = new VectorContainerWriter(output, true);
MapWriter writer = rootWriter.rootAsMap();
rootWriter.setPosition(0);
{
writer.map("map").bigInt("a").writeBigInt(1);
}
rootWriter.setPosition(1);
{
writer.map("map").float4("a").writeFloat4(2.0f);
}
rootWriter.setPosition(2);
{
writer.map("map").list("a").startList();
writer.map("map").list("a").endList();
}
rootWriter.setPosition(3);
{
writer.map("map").list("a").startList();
writer.map("map").list("a").bigInt().writeBigInt(3);
writer.map("map").list("a").float4().writeFloat4(4);
writer.map("map").list("a").endList();
}
rootWriter.setValueCount(4);
BatchPrinter.printBatch(output.getContainer());
}
use of org.apache.drill.exec.vector.complex.impl.VectorContainerWriter in project drill by apache.
the class MaprDBJsonRecordReader method next.
@Override
public int next() {
Stopwatch watch = Stopwatch.createUnstarted();
watch.start();
vectorWriter.allocate();
vectorWriter.reset();
int recordCount = 0;
reader = null;
document = null;
int maxRecordsForThisBatch = this.maxRecordsToRead >= 0 ? Math.min(BaseValueVector.INITIAL_VALUE_ALLOCATION, this.maxRecordsToRead) : BaseValueVector.INITIAL_VALUE_ALLOCATION;
try {
// If the last document caused a SchemaChange create a new output schema for this scan batch
if (schemaState == SchemaState.SCHEMA_CHANGE && !ignoreSchemaChange) {
// Clear the ScanBatch vector container writer/mutator in order to be able to generate the new schema
vectorWriterMutator.clear();
vectorWriter = new VectorContainerWriter(vectorWriterMutator, unionEnabled);
logger.debug("Encountered schema change earlier use new writer {}", vectorWriter.toString());
document = lastDocument;
setupWriter();
if (recordCount < maxRecordsForThisBatch) {
vectorWriter.setPosition(recordCount);
if (document != null) {
reader = (DBDocumentReaderBase) document.asReader();
documentWriter.writeDBDocument(vectorWriter, reader);
recordCount++;
}
}
}
} catch (SchemaChangeException e) {
String err_row = reader.getId().asJsonString();
if (ignoreSchemaChange) {
logger.warn("{}. Dropping row '{}' from result.", e.getMessage(), err_row);
logger.debug("Stack trace:", e);
} else {
/* We should not encounter a SchemaChangeException here since this is the first document for this
* new schema. Something is very wrong - cannot handle any further!
*/
throw dataReadError(logger, e, "SchemaChangeException for row '%s'.", err_row);
}
}
schemaState = SchemaState.SCHEMA_INIT;
while (recordCount < maxRecordsForThisBatch) {
vectorWriter.setPosition(recordCount);
try {
document = nextDocument();
if (document == null) {
// no more documents for this reader
break;
} else {
documentWriter.writeDBDocument(vectorWriter, (DBDocumentReaderBase) document.asReader());
}
recordCount++;
} catch (UserException e) {
throw UserException.unsupportedError(e).addContext(String.format("Table: %s, document id: '%s'", table.getPath(), document.asReader() == null ? null : IdCodec.asString(((DBDocumentReaderBase) document.asReader()).getId()))).build(logger);
} catch (SchemaChangeException e) {
String err_row = ((DBDocumentReaderBase) document.asReader()).getId().asJsonString();
if (ignoreSchemaChange) {
logger.warn("{}. Dropping row '{}' from result.", e.getMessage(), err_row);
logger.debug("Stack trace:", e);
} else {
/* Save the current document reader for next iteration. The recordCount is not updated so we
* would start from this reader on the next next() call
*/
lastDocument = document;
schemaState = SchemaState.SCHEMA_CHANGE;
break;
}
}
}
if (nonExistentColumnsProjection && recordCount > 0) {
if (schema == null || schema.isEmpty()) {
JsonReaderUtils.ensureAtLeastOneField(vectorWriter, getColumns(), allTextMode, Collections.emptyList());
} else {
JsonReaderUtils.writeColumnsUsingSchema(vectorWriter, getColumns(), schema, allTextMode);
}
}
vectorWriter.setValueCount(recordCount);
if (maxRecordsToRead > 0) {
maxRecordsToRead -= recordCount;
}
logger.debug("Took {} ms to get {} records", watch.elapsed(TimeUnit.MILLISECONDS), recordCount);
return recordCount;
}
use of org.apache.drill.exec.vector.complex.impl.VectorContainerWriter in project drill by apache.
the class MaprDBJsonRecordReader method setup.
@Override
public void setup(OperatorContext context, OutputMutator output) throws ExecutionSetupException {
this.vectorWriter = new VectorContainerWriter(output, unionEnabled);
this.vectorWriterMutator = output;
this.operatorContext = context;
try {
table.setOption(TableOption.EXCLUDEID, !includeId);
documentStream = table.find(condition, scannedFields);
documentIterator = documentStream.iterator();
setupWriter();
} catch (DBException ex) {
throw new ExecutionSetupException(ex);
}
}
use of org.apache.drill.exec.vector.complex.impl.VectorContainerWriter in project drill by apache.
the class MongoRecordReader method setup.
@Override
public void setup(OperatorContext context, OutputMutator output) throws ExecutionSetupException {
this.writer = new VectorContainerWriter(output, unionEnabled);
// BsonRecordReader
if (isBsonRecordReader) {
this.bsonReader = new BsonRecordReader(fragmentContext.getManagedBuffer(), Lists.newArrayList(getColumns()), readNumbersAsDouble);
logger.debug("Initialized BsonRecordReader. ");
} else {
this.jsonReader = new JsonReader.Builder(fragmentContext.getManagedBuffer()).schemaPathColumns(Lists.newArrayList(getColumns())).allTextMode(enableAllTextMode).readNumbersAsDouble(readNumbersAsDouble).enableNanInf(enableNanInf).build();
logger.debug(" Intialized JsonRecordReader. ");
}
}
Aggregations