use of org.apache.drill.exec.record.metadata.ColumnMetadata in project drill by apache.
the class SasBatchReader method addImplicitColumnsToSchema.
private TupleMetadata addImplicitColumnsToSchema(TupleMetadata schema) {
SchemaBuilder builder = new SchemaBuilder();
ColumnMetadata colSchema;
builder.addAll(schema);
SasFileProperties fileProperties = sasFileReader.getSasFileProperties();
// Add String Metadata columns
for (IMPLICIT_STRING_COLUMN name : IMPLICIT_STRING_COLUMN.values()) {
colSchema = MetadataUtils.newScalar(name.getFieldName(), MinorType.VARCHAR, DataMode.OPTIONAL);
colSchema.setBooleanProperty(ColumnMetadata.EXCLUDE_FROM_WILDCARD, true);
builder.add(colSchema);
}
// Add Date Column Names
for (IMPLICIT_DATE_COLUMN name : IMPLICIT_DATE_COLUMN.values()) {
colSchema = MetadataUtils.newScalar(name.getFieldName(), MinorType.DATE, DataMode.OPTIONAL);
colSchema.setBooleanProperty(ColumnMetadata.EXCLUDE_FROM_WILDCARD, true);
builder.add(colSchema);
}
populateMetadata(fileProperties);
return builder.build();
}
use of org.apache.drill.exec.record.metadata.ColumnMetadata in project drill by apache.
the class SyslogBatchReader method getColWriter.
private ScalarWriter getColWriter(TupleWriter tupleWriter, String fieldName, TypeProtos.MinorType type) {
int index = tupleWriter.tupleSchema().index(fieldName);
if (index == -1) {
ColumnMetadata colSchema = MetadataUtils.newScalar(fieldName, type, TypeProtos.DataMode.OPTIONAL);
index = tupleWriter.addColumn(colSchema);
}
return tupleWriter.scalar(index);
}
use of org.apache.drill.exec.record.metadata.ColumnMetadata in project drill by apache.
the class XMLReader method writeAttributeData.
/**
* Writes a attribute. If the field does not have a corresponding ScalarWriter, this method will
* create one.
* @param fieldName The field name
* @param fieldValue The field value to be written
* @param writer The TupleWriter which represents
*/
private void writeAttributeData(String fieldName, String fieldValue, TupleWriter writer) {
if (fieldName == null) {
return;
}
// Find the TupleWriter object
int index = writer.tupleSchema().index(fieldName);
if (index == -1) {
ColumnMetadata colSchema = MetadataUtils.newScalar(fieldName, TypeProtos.MinorType.VARCHAR, TypeProtos.DataMode.OPTIONAL);
index = writer.addColumn(colSchema);
}
ScalarWriter colWriter = writer.scalar(index);
if (fieldValue != null) {
colWriter.setString(fieldValue);
}
}
use of org.apache.drill.exec.record.metadata.ColumnMetadata in project drill by apache.
the class JsonReaderUtils method writeColumnsUsingSchema.
/**
* Creates writers which correspond to the specified schema for specified root writer.
*
* @param writer parent writer for writers to create
* @param columns collection of columns for which writers should be created
* @param schema table schema
* @param allTextMode whether all primitive writers should be of varchar type
*/
public static void writeColumnsUsingSchema(BaseWriter.ComplexWriter writer, Collection<SchemaPath> columns, TupleMetadata schema, boolean allTextMode) {
BaseWriter.MapWriter mapWriter = writer.rootAsMap();
for (SchemaPath column : columns) {
if (column.isDynamicStar()) {
writeSchemaColumns(schema, mapWriter, allTextMode);
} else {
ColumnMetadata columnMetadata = schema.metadata(column.getRootSegmentPath());
writeColumnToMapWriter(mapWriter, column.getRootSegment(), columnMetadata, allTextMode);
}
}
}
use of org.apache.drill.exec.record.metadata.ColumnMetadata in project drill by apache.
the class TestMockPlugin method testDouble.
@Test
public void testDouble() throws RpcException {
String sql = "SELECT balance_d FROM `mock`.`employee_100`";
RowSet result = client.queryBuilder().sql(sql).rowSet();
TupleMetadata schema = result.schema();
assertEquals(1, schema.size());
ColumnMetadata col = schema.metadata(0);
assertEquals("balance_d", col.name());
assertEquals(MinorType.FLOAT8, col.type());
assertEquals(DataMode.REQUIRED, col.mode());
assertEquals(100, result.rowCount());
result.clear();
}
Aggregations