use of org.apache.drill.exec.record.metadata.TupleSchema in project drill by apache.
the class TestProjectionFilter method testTypeFilter.
@Test
public void testTypeFilter() {
TupleMetadata schema = new SchemaBuilder().add(A_COL.copy()).add(B_COL.copy()).addMap("m").add("a", MinorType.INT).resumeSchema().build();
ProjectionFilter filter = new TypeProjectionFilter(schema, EmptyErrorContext.INSTANCE);
assertFalse(filter.isEmpty());
assertTrue(filter.isProjected("a"));
assertTrue(filter.projection(A_COL).isProjected);
assertTrue(filter.isProjected("b"));
assertTrue(filter.projection(B_COL).isProjected);
assertTrue(filter.isProjected("c"));
assertTrue(filter.projection(MetadataUtils.newScalar("c", Types.required(MinorType.BIGINT))).isProjected);
ColumnMetadata typeConflict = MetadataUtils.newScalar("a", Types.required(MinorType.BIGINT));
try {
filter.projection(typeConflict);
fail();
} catch (UserException e) {
assertTrue(e.getMessage().contains("conflict"));
}
ColumnMetadata modeConflict = MetadataUtils.newScalar("a", Types.optional(MinorType.INT));
try {
filter.projection(modeConflict);
fail();
} catch (UserException e) {
assertTrue(e.getMessage().contains("conflict"));
}
ProjResult result = filter.projection(MAP_COL);
assertTrue(result.isProjected);
ProjectionFilter child = result.mapFilter;
assertTrue(child.isProjected("a"));
assertTrue(child.isProjected("b"));
result = filter.projection(MAP_COL2);
assertTrue(result.isProjected);
assertSame(ProjectionFilter.PROJECT_ALL, result.mapFilter);
try {
ColumnMetadata aMap = MetadataUtils.newMap("a", new TupleSchema());
filter.projection(aMap);
fail();
} catch (UserException e) {
assertTrue(e.getMessage().contains("type conflict"));
}
}
use of org.apache.drill.exec.record.metadata.TupleSchema in project drill by apache.
the class IcebergColumnConverterFactory method convertSchema.
public static TupleSchema convertSchema(Types.StructType structType) {
TupleSchema schema = new TupleSchema();
for (Types.NestedField field : structType.fields()) {
ColumnMetadata columnMetadata = getColumnMetadata(field);
schema.add(columnMetadata);
}
return schema;
}
use of org.apache.drill.exec.record.metadata.TupleSchema in project drill by apache.
the class CompliantTextBatchReader method buildFromColumnHeaders.
/**
* File has column headers. No provided schema. Build schema from the
* column headers.
*/
private FieldVarCharOutput buildFromColumnHeaders(ColumnsSchemaNegotiator schemaNegotiator, String[] fieldNames) {
final TupleMetadata schema = new TupleSchema();
for (final String colName : fieldNames) {
schema.addColumn(textColumn(colName));
}
schemaNegotiator.tableSchema(schema, true);
writer = schemaNegotiator.build().writer();
ValueWriter[] colWriters = new ValueWriter[fieldNames.length];
for (int i = 0; i < fieldNames.length; i++) {
colWriters[i] = writer.column(i).scalar();
}
return new FieldVarCharOutput(writer, colWriters);
}
use of org.apache.drill.exec.record.metadata.TupleSchema in project drill by apache.
the class CompliantTextBatchReader method mergeSchemas.
private TupleMetadata mergeSchemas(TupleMetadata providedSchema, String[] fieldNames) {
final TupleMetadata readerSchema = new TupleSchema();
for (String fieldName : fieldNames) {
final ColumnMetadata providedCol = providedSchema.metadata(fieldName);
readerSchema.addColumn(providedCol == null ? textColumn(fieldName) : providedCol);
}
return readerSchema;
}
use of org.apache.drill.exec.record.metadata.TupleSchema in project drill by apache.
the class SchemaVisitor method visitColumns.
@Override
public TupleMetadata visitColumns(SchemaParser.ColumnsContext ctx) {
TupleMetadata schema = new TupleSchema();
ColumnDefVisitor columnDefVisitor = new ColumnDefVisitor();
ctx.column_def().forEach(columnDef -> schema.addColumn(columnDef.accept(columnDefVisitor)));
return schema;
}
Aggregations