use of org.apache.drill.exec.vector.accessor.ValueWriter in project drill by apache.
the class CompliantTextBatchReader method buildWithSchema.
/**
* File has headers and a provided schema is provided. Convert from VARCHAR
* input type to the provided output type, but only if the column is projected.
*/
private FieldVarCharOutput buildWithSchema(ColumnsSchemaNegotiator schemaNegotiator, String[] fieldNames) {
TupleMetadata readerSchema = mergeSchemas(schemaNegotiator.providedSchema(), fieldNames);
schemaNegotiator.tableSchema(readerSchema, true);
writer = schemaNegotiator.build().writer();
StandardConversions conversions = conversions(schemaNegotiator.providedSchema());
ValueWriter[] colWriters = new ValueWriter[fieldNames.length];
for (int i = 0; i < fieldNames.length; i++) {
ScalarWriter colWriter = writer.scalar(fieldNames[i]);
if (writer.isProjected()) {
colWriters[i] = conversions.converterFor(colWriter, MinorType.VARCHAR);
} else {
colWriters[i] = colWriter;
}
}
return new FieldVarCharOutput(writer, colWriters);
}
use of org.apache.drill.exec.vector.accessor.ValueWriter in project drill by apache.
the class CompliantTextBatchReader method buildFromColumnHeaders.
/**
* File has column headers. No provided schema. Build schema from the
* column headers.
*/
private FieldVarCharOutput buildFromColumnHeaders(ColumnsSchemaNegotiator schemaNegotiator, String[] fieldNames) {
final TupleMetadata schema = new TupleSchema();
for (final String colName : fieldNames) {
schema.addColumn(textColumn(colName));
}
schemaNegotiator.tableSchema(schema, true);
writer = schemaNegotiator.build().writer();
ValueWriter[] colWriters = new ValueWriter[fieldNames.length];
for (int i = 0; i < fieldNames.length; i++) {
colWriters[i] = writer.column(i).scalar();
}
return new FieldVarCharOutput(writer, colWriters);
}
use of org.apache.drill.exec.vector.accessor.ValueWriter in project drill by apache.
the class CompliantTextBatchReader method buildWithSchema.
private FieldVarCharOutput buildWithSchema(ColumnsSchemaNegotiator schemaNegotiator) {
TupleMetadata providedSchema = schemaNegotiator.providedSchema();
schemaNegotiator.tableSchema(providedSchema, true);
writer = schemaNegotiator.build().writer();
StandardConversions conversions = conversions(providedSchema);
ValueWriter[] colWriters = new ValueWriter[providedSchema.size()];
for (int i = 0; i < colWriters.length; i++) {
colWriters[i] = conversions.converterFor(writer.scalar(providedSchema.metadata(i).name()), MinorType.VARCHAR);
}
return new ConstrainedFieldOutput(writer, colWriters);
}
use of org.apache.drill.exec.vector.accessor.ValueWriter in project drill by apache.
the class BaseFieldOutput method writeToVector.
/**
* Write a buffer of data to the underlying vector using the
* column writer. The buffer holds a complete or partial chunk
* of data for the field. If this is the first data for the field,
* write the bytes. If this is a second buffer for the same field,
* append the bytes. The append will work if the underlying vector
* is VarChar, it will fail if a type conversion shim is in between.
* (This is generally OK because the previous setBytes should have
* failed because a large int or date is not supported.)
*/
protected void writeToVector() {
if (!fieldProjected) {
return;
}
ValueWriter colWriter = columnWriter();
if (fieldWriteCount == 0) {
colWriter.setBytes(fieldBytes, currentDataPointer);
} else {
colWriter.appendBytes(fieldBytes, currentDataPointer);
}
fieldWriteCount += currentDataPointer;
currentDataPointer = 0;
}
Aggregations