use of org.apache.drill.exec.record.metadata.ColumnMetadata in project drill by apache.
the class BuildFromSchema method buildSingleList.
private ObjectWriter buildSingleList(ParentShim parent, ColumnMetadata colSchema) {
final ColumnMetadata seed = colSchema.cloneEmpty();
final ColumnMetadata subtype = colSchema.variantSchema().listSubtype();
seed.variantSchema().addType(subtype.cloneEmpty());
seed.variantSchema().becomeSimple();
final ObjectWriter listWriter = parent.add(seed);
expandColumn(listWriter, subtype);
return listWriter;
}
use of org.apache.drill.exec.record.metadata.ColumnMetadata in project drill by apache.
the class TestProjectionFilter method testImplicitAll.
@Test
public void testImplicitAll() {
ProjectionFilter filter = ProjectionFilter.PROJECT_ALL;
assertTrue(filter.isProjected("a"));
assertTrue(filter.projection(A_COL).isProjected);
ColumnMetadata specialCol = MetadataUtils.newScalar("special", Types.optional(MinorType.BIGINT));
specialCol.setBooleanProperty(ColumnMetadata.EXCLUDE_FROM_WILDCARD, true);
assertFalse(filter.projection(specialCol).isProjected);
assertFalse(filter.isEmpty());
ProjResult result = filter.projection(MAP_COL);
assertTrue(result.isProjected);
assertSame(ProjectionFilter.PROJECT_ALL, result.mapFilter);
}
use of org.apache.drill.exec.record.metadata.ColumnMetadata in project drill by apache.
the class TestProjectionFilter method testTypeFilter.
@Test
public void testTypeFilter() {
TupleMetadata schema = new SchemaBuilder().add(A_COL.copy()).add(B_COL.copy()).addMap("m").add("a", MinorType.INT).resumeSchema().build();
ProjectionFilter filter = new TypeProjectionFilter(schema, EmptyErrorContext.INSTANCE);
assertFalse(filter.isEmpty());
assertTrue(filter.isProjected("a"));
assertTrue(filter.projection(A_COL).isProjected);
assertTrue(filter.isProjected("b"));
assertTrue(filter.projection(B_COL).isProjected);
assertTrue(filter.isProjected("c"));
assertTrue(filter.projection(MetadataUtils.newScalar("c", Types.required(MinorType.BIGINT))).isProjected);
ColumnMetadata typeConflict = MetadataUtils.newScalar("a", Types.required(MinorType.BIGINT));
try {
filter.projection(typeConflict);
fail();
} catch (UserException e) {
assertTrue(e.getMessage().contains("conflict"));
}
ColumnMetadata modeConflict = MetadataUtils.newScalar("a", Types.optional(MinorType.INT));
try {
filter.projection(modeConflict);
fail();
} catch (UserException e) {
assertTrue(e.getMessage().contains("conflict"));
}
ProjResult result = filter.projection(MAP_COL);
assertTrue(result.isProjected);
ProjectionFilter child = result.mapFilter;
assertTrue(child.isProjected("a"));
assertTrue(child.isProjected("b"));
result = filter.projection(MAP_COL2);
assertTrue(result.isProjected);
assertSame(ProjectionFilter.PROJECT_ALL, result.mapFilter);
try {
ColumnMetadata aMap = MetadataUtils.newMap("a", new TupleSchema());
filter.projection(aMap);
fail();
} catch (UserException e) {
assertTrue(e.getMessage().contains("type conflict"));
}
}
use of org.apache.drill.exec.record.metadata.ColumnMetadata in project drill by apache.
the class TestResultSetLoaderProjection method testMapProjection.
@Test
public void testMapProjection() {
List<SchemaPath> selection = RowSetTestUtils.projectList("m1", "m2.d");
TupleMetadata schema = new SchemaBuilder().addMap("m1").add("a", MinorType.INT).add("b", MinorType.INT).resumeSchema().addMap("m2").add("c", MinorType.INT).add("d", MinorType.INT).resumeSchema().addMap("m3").add("e", MinorType.INT).add("f", MinorType.INT).resumeSchema().buildSchema();
ResultSetOptions options = new ResultSetOptionBuilder().projection(Projections.parse(selection)).readerSchema(schema).build();
ResultSetLoader rsLoader = new ResultSetLoaderImpl(fixture.allocator(), options);
RowSetLoader rootWriter = rsLoader.writer();
// Verify the projected columns
TupleMetadata actualSchema = rootWriter.tupleSchema();
ColumnMetadata m1Md = actualSchema.metadata("m1");
TupleWriter m1Writer = rootWriter.tuple("m1");
assertTrue(m1Md.isMap());
assertTrue(m1Writer.isProjected());
assertEquals(2, m1Md.tupleSchema().size());
assertTrue(m1Writer.column("a").isProjected());
assertTrue(m1Writer.column("b").isProjected());
ColumnMetadata m2Md = actualSchema.metadata("m2");
TupleWriter m2Writer = rootWriter.tuple("m2");
assertTrue(m2Md.isMap());
assertTrue(m2Writer.isProjected());
assertEquals(2, m2Md.tupleSchema().size());
assertFalse(m2Writer.column("c").isProjected());
assertTrue(m2Writer.column("d").isProjected());
ColumnMetadata m3Md = actualSchema.metadata("m3");
TupleWriter m3Writer = rootWriter.tuple("m3");
assertTrue(m3Md.isMap());
assertFalse(m3Writer.isProjected());
assertEquals(2, m3Md.tupleSchema().size());
assertFalse(m3Writer.column("e").isProjected());
assertFalse(m3Writer.column("f").isProjected());
// Write a couple of rows.
rsLoader.startBatch();
rootWriter.start();
rootWriter.addRow(mapValue(1, 2), mapValue(3, 4), mapValue(5, 6)).addRow(mapValue(11, 12), mapValue(13, 14), mapValue(15, 16));
// Verify. Only the projected columns appear in the result set.
TupleMetadata expectedSchema = new SchemaBuilder().addMap("m1").add("a", MinorType.INT).add("b", MinorType.INT).resumeSchema().addMap("m2").add("d", MinorType.INT).resumeSchema().buildSchema();
SingleRowSet expected = fixture.rowSetBuilder(expectedSchema).addRow(mapValue(1, 2), mapValue(4)).addRow(mapValue(11, 12), mapValue(14)).build();
RowSetUtilities.verify(expected, fixture.wrap(rsLoader.harvest()));
rsLoader.close();
}
use of org.apache.drill.exec.record.metadata.ColumnMetadata in project drill by apache.
the class TestResultSetLoaderProjection method testDictProjection.
@Test
public void testDictProjection() {
final String dictName1 = "d1";
final String dictName2 = "d2";
// There is no test for case when obtaining a value by key as this is not as simple projection
// as it is in case of map - there is a need to find a value corresponding to a key
// (the functionality is currently present in DictReader) and final column schema should be
// changed from dict structure with `key` and `value` children to a simple `value`.
List<SchemaPath> selection = RowSetTestUtils.projectList(dictName1);
TupleMetadata schema = new SchemaBuilder().addDict(dictName1, MinorType.VARCHAR).value(MinorType.INT).resumeSchema().addDict(dictName2, MinorType.VARCHAR).value(MinorType.INT).resumeSchema().buildSchema();
ResultSetOptions options = new ResultSetOptionBuilder().projection(Projections.parse(selection)).readerSchema(schema).build();
ResultSetLoader rsLoader = new ResultSetLoaderImpl(fixture.allocator(), options);
RowSetLoader rootWriter = rsLoader.writer();
// Verify the projected columns
TupleMetadata actualSchema = rootWriter.tupleSchema();
ColumnMetadata dictMetadata1 = actualSchema.metadata(dictName1);
DictWriter dictWriter1 = rootWriter.dict(dictName1);
assertTrue(dictMetadata1.isDict());
assertTrue(dictWriter1.isProjected());
assertEquals(2, dictMetadata1.tupleSchema().size());
assertTrue(dictWriter1.keyWriter().isProjected());
assertTrue(dictWriter1.valueWriter().isProjected());
ColumnMetadata dictMetadata2 = actualSchema.metadata(dictName2);
DictWriter dictWriter2 = rootWriter.dict(dictName2);
assertTrue(dictMetadata2.isDict());
assertFalse(dictWriter2.isProjected());
assertEquals(2, dictMetadata2.tupleSchema().size());
assertFalse(dictWriter2.keyWriter().isProjected());
assertFalse(dictWriter2.valueWriter().isProjected());
// Write a couple of rows.
rsLoader.startBatch();
rootWriter.start();
rootWriter.addRow(map("a", 1, "b", 2), map("c", 3, "d", 4)).addRow(map("a", 11, "b", 12), map("c", 13, "d", 14));
// Verify. Only the projected columns appear in the result set.
TupleMetadata expectedSchema = new SchemaBuilder().addDict(dictName1, MinorType.VARCHAR).value(MinorType.INT).resumeSchema().buildSchema();
SingleRowSet expected = fixture.rowSetBuilder(expectedSchema).addRow(map("a", 1, "b", 2)).addRow(map("a", 11, "b", 12)).build();
RowSetUtilities.verify(expected, fixture.wrap(rsLoader.harvest()));
rsLoader.close();
}
Aggregations