use of org.apache.drill.exec.physical.impl.scan.convert.StandardConversions in project drill by apache.
the class CompliantTextBatchReader method buildWithSchema.
/**
* File has headers and a provided schema is provided. Convert from VARCHAR
* input type to the provided output type, but only if the column is projected.
*/
private FieldVarCharOutput buildWithSchema(ColumnsSchemaNegotiator schemaNegotiator, String[] fieldNames) {
TupleMetadata readerSchema = mergeSchemas(schemaNegotiator.providedSchema(), fieldNames);
schemaNegotiator.tableSchema(readerSchema, true);
writer = schemaNegotiator.build().writer();
StandardConversions conversions = conversions(schemaNegotiator.providedSchema());
ValueWriter[] colWriters = new ValueWriter[fieldNames.length];
for (int i = 0; i < fieldNames.length; i++) {
ScalarWriter colWriter = writer.scalar(fieldNames[i]);
if (writer.isProjected()) {
colWriters[i] = conversions.converterFor(colWriter, MinorType.VARCHAR);
} else {
colWriters[i] = colWriter;
}
}
return new FieldVarCharOutput(writer, colWriters);
}
use of org.apache.drill.exec.physical.impl.scan.convert.StandardConversions in project drill by apache.
the class CompliantTextBatchReader method buildWithSchema.
private FieldVarCharOutput buildWithSchema(ColumnsSchemaNegotiator schemaNegotiator) {
TupleMetadata providedSchema = schemaNegotiator.providedSchema();
schemaNegotiator.tableSchema(providedSchema, true);
writer = schemaNegotiator.build().writer();
StandardConversions conversions = conversions(providedSchema);
ValueWriter[] colWriters = new ValueWriter[providedSchema.size()];
for (int i = 0; i < colWriters.length; i++) {
colWriters[i] = conversions.converterFor(writer.scalar(providedSchema.metadata(i).name()), MinorType.VARCHAR);
}
return new ConstrainedFieldOutput(writer, colWriters);
}
use of org.apache.drill.exec.physical.impl.scan.convert.StandardConversions in project drill by apache.
the class LogBatchReader method bindColumns.
private void bindColumns(RowSetLoader writer) {
rawColWriter = writer.scalar(RAW_LINE_COL_NAME);
unmatchedColWriter = writer.scalar(UNMATCHED_LINE_COL_NAME);
saveMatchedRows = rawColWriter.isProjected();
// If no match-case columns are projected, and the unmatched
// columns is unprojected, then we want to count (matched)
// rows.
saveMatchedRows |= !unmatchedColWriter.isProjected();
// columns.) If so, create a vector writer to save values.
if (config.asArray) {
saveMatchedRows |= writer.column(0).isProjected();
if (saveMatchedRows) {
// Save columns as an array
vectorWriter = new ColumnsArrayWriter(writer);
}
} else {
for (int i = 0; i < config.readerSchema.size(); i++) {
saveMatchedRows |= writer.column(i).isProjected();
}
if (saveMatchedRows) {
// Save using the defined columns
TupleMetadata providedSchema = config.providedSchema;
StandardConversions conversions = StandardConversions.builder().withSchema(providedSchema).build();
vectorWriter = new ScalarGroupWriter(writer, config.readerSchema, conversions);
}
}
}
Aggregations