use of org.apache.drill.test.rowSet.schema.SchemaBuilder in project drill by axbaretto.
the class RowSetTest method testRepeatedMapStructure.
@Test
public void testRepeatedMapStructure() {
TupleMetadata schema = new SchemaBuilder().add("a", MinorType.INT).addMapArray("m").add("b", MinorType.INT).add("c", MinorType.INT).resumeSchema().buildSchema();
ExtendableRowSet rowSet = fixture.rowSet(schema);
RowSetWriter writer = rowSet.writer();
// Map and Int
// Pick out components and lightly test. (Assumes structure
// tested earlier is still valid, so no need to exhaustively
// test again.)
assertEquals(ObjectType.SCALAR, writer.column("a").type());
assertEquals(ObjectType.ARRAY, writer.column("m").type());
ArrayWriter maWriter = writer.column(1).array();
assertEquals(ObjectType.TUPLE, maWriter.entryType());
TupleWriter mapWriter = maWriter.tuple();
assertEquals(ObjectType.SCALAR, mapWriter.column("b").type());
assertEquals(ObjectType.SCALAR, mapWriter.column("c").type());
ScalarWriter aWriter = writer.column("a").scalar();
ScalarWriter bWriter = mapWriter.scalar("b");
ScalarWriter cWriter = mapWriter.scalar("c");
assertEquals(ValueType.INTEGER, aWriter.valueType());
assertEquals(ValueType.INTEGER, bWriter.valueType());
assertEquals(ValueType.INTEGER, cWriter.valueType());
// Write data
aWriter.setInt(10);
bWriter.setInt(101);
cWriter.setInt(102);
// Advance to next array position
maWriter.save();
bWriter.setInt(111);
cWriter.setInt(112);
maWriter.save();
writer.save();
aWriter.setInt(20);
bWriter.setInt(201);
cWriter.setInt(202);
maWriter.save();
bWriter.setInt(211);
cWriter.setInt(212);
maWriter.save();
writer.save();
aWriter.setInt(30);
bWriter.setInt(301);
cWriter.setInt(302);
maWriter.save();
bWriter.setInt(311);
cWriter.setInt(312);
maWriter.save();
writer.save();
// Finish the row set and get a reader.
SingleRowSet actual = writer.done();
RowSetReader reader = actual.reader();
// Verify reader structure
assertEquals(ObjectType.SCALAR, reader.column("a").type());
assertEquals(ObjectType.ARRAY, reader.column("m").type());
ArrayReader maReader = reader.column(1).array();
assertEquals(ObjectType.TUPLE, maReader.entryType());
TupleReader mapReader = maReader.tuple();
assertEquals(ObjectType.SCALAR, mapReader.column("b").type());
assertEquals(ObjectType.SCALAR, mapReader.column("c").type());
ScalarReader aReader = reader.column("a").scalar();
ScalarReader bReader = mapReader.scalar("b");
ScalarReader cReader = mapReader.scalar("c");
assertEquals(ValueType.INTEGER, aReader.valueType());
assertEquals(ValueType.INTEGER, bReader.valueType());
assertEquals(ValueType.INTEGER, cReader.valueType());
// Row 1: use index accessors
assertTrue(reader.next());
assertEquals(10, aReader.getInt());
TupleReader ixReader = maReader.tuple(0);
assertEquals(101, ixReader.scalar(0).getInt());
assertEquals(102, ixReader.scalar(1).getInt());
ixReader = maReader.tuple(1);
assertEquals(111, ixReader.scalar(0).getInt());
assertEquals(112, ixReader.scalar(1).getInt());
// Row 2: use common accessor with explicit positioning,
// but access scalars through the map reader.
assertTrue(reader.next());
assertEquals(20, aReader.getInt());
maReader.setPosn(0);
assertEquals(201, mapReader.scalar(0).getInt());
assertEquals(202, mapReader.scalar(1).getInt());
maReader.setPosn(1);
assertEquals(211, mapReader.scalar(0).getInt());
assertEquals(212, mapReader.scalar(1).getInt());
// Row 3: use common accessor for scalars
assertTrue(reader.next());
assertEquals(30, aReader.getInt());
maReader.setPosn(0);
assertEquals(301, bReader.getInt());
assertEquals(302, cReader.getInt());
maReader.setPosn(1);
assertEquals(311, bReader.getInt());
assertEquals(312, cReader.getInt());
assertFalse(reader.next());
// Verify that the map accessor's value count was set.
@SuppressWarnings("resource") RepeatedMapVector mapVector = (RepeatedMapVector) actual.container().getValueVector(1).getValueVector();
assertEquals(3, mapVector.getAccessor().getValueCount());
// Verify the readers and writers again using the testing tools.
SingleRowSet expected = fixture.rowSetBuilder(schema).addRow(10, objArray(objArray(101, 102), objArray(111, 112))).addRow(20, objArray(objArray(201, 202), objArray(211, 212))).addRow(30, objArray(objArray(301, 302), objArray(311, 312))).build();
new RowSetComparison(expected).verifyAndClearAll(actual);
}
use of org.apache.drill.test.rowSet.schema.SchemaBuilder in project drill by axbaretto.
the class TestFillEmpties method doFillEmptiesScalar.
private void doFillEmptiesScalar(MajorType majorType) {
TupleMetadata schema = new SchemaBuilder().add("a", majorType).buildSchema();
ExtendableRowSet rs = fixture.rowSet(schema);
RowSetWriter writer = rs.writer();
ScalarWriter colWriter = writer.scalar(0);
ValueType valueType = colWriter.valueType();
boolean nullable = majorType.getMode() == DataMode.OPTIONAL;
for (int i = 0; i < ROW_COUNT; i++) {
if (i % 5 == 0) {
colWriter.setObject(RowSetUtilities.testDataFromInt(valueType, majorType, i));
}
writer.save();
}
SingleRowSet result = writer.done();
RowSetReader reader = result.reader();
ScalarReader colReader = reader.scalar(0);
MinorType type = majorType.getMinorType();
boolean isVariable = (type == MinorType.VARCHAR || type == MinorType.VAR16CHAR || type == MinorType.VARBINARY);
for (int i = 0; i < ROW_COUNT; i++) {
assertTrue(reader.next());
if (i % 5 != 0) {
if (nullable) {
// Nullable types fill with nulls.
assertTrue(colReader.isNull());
continue;
}
if (isVariable) {
// Variable width types fill with a zero-length value.
assertEquals(0, colReader.getBytes().length);
continue;
}
}
// All other types fill with zero-bytes, interpreted as some form
// of zero for each type.
Object actual = colReader.getObject();
Object expected = RowSetUtilities.testDataFromInt(valueType, majorType, i % 5 == 0 ? i : 0);
RowSetUtilities.assertEqualValues(majorType.toString().replace('\n', ' ') + "[" + i + "]", valueType, expected, actual);
}
result.clear();
}
use of org.apache.drill.test.rowSet.schema.SchemaBuilder in project drill by axbaretto.
the class TestMiniPlan method testSimpleParquetScan.
@Test
public void testSimpleParquetScan() throws Exception {
String file = DrillFileUtils.getResourceAsFile("/tpchmulti/region/01.parquet").toURI().toString();
RecordBatch scanBatch = new ParquetScanBuilder().fileSystem(fs).columnsToRead("R_REGIONKEY").inputPaths(Lists.newArrayList(file)).build();
BatchSchema expectedSchema = new SchemaBuilder().add("R_REGIONKEY", TypeProtos.MinorType.BIGINT).build();
new MiniPlanTestBuilder().root(scanBatch).expectSchema(expectedSchema).baselineValues(0L).baselineValues(1L).go();
}
use of org.apache.drill.test.rowSet.schema.SchemaBuilder in project drill by axbaretto.
the class TestNullInputMiniPlan method testHashJoinLeftEmpty.
@Test
public void testHashJoinLeftEmpty() throws Exception {
RecordBatch left = createScanBatchFromJson(SINGLE_EMPTY_JSON);
List<String> rightJsonBatches = Lists.newArrayList("[{\"a\": 50, \"b\" : 10 }]");
RecordBatch rightScan = new JsonScanBuilder().jsonBatches(rightJsonBatches).columnsToRead("a", "b").build();
RecordBatch joinBatch = new PopBuilder().physicalOperator(new HashJoinPOP(null, null, Lists.newArrayList(joinCond("a", "EQUALS", "a2")), JoinRelType.INNER)).addInput(left).addInput(rightScan).build();
BatchSchema expectedSchema = new SchemaBuilder().addNullable("a", TypeProtos.MinorType.BIGINT).addNullable("b", TypeProtos.MinorType.BIGINT).withSVMode(BatchSchema.SelectionVectorMode.NONE).build();
new MiniPlanTestBuilder().root(joinBatch).expectSchema(expectedSchema).expectZeroRow(true).go();
}
use of org.apache.drill.test.rowSet.schema.SchemaBuilder in project drill by axbaretto.
the class TestNullInputMiniPlan method testJsonInputMixedWithEmptyFiles1.
/**
* Test ScanBatch with mixed json files.
* input is empty, data_file, empty, data_file
*/
@Test
public void testJsonInputMixedWithEmptyFiles1() throws Exception {
RecordBatch scanBatch = createScanBatchFromJson(SINGLE_EMPTY_JSON, SINGLE_JSON, SINGLE_EMPTY_JSON2, SINGLE_JSON2);
BatchSchema expectedSchema = new SchemaBuilder().addNullable("id", TypeProtos.MinorType.BIGINT).addNullable("name", TypeProtos.MinorType.VARCHAR).build();
new MiniPlanTestBuilder().root(scanBatch).expectSchema(expectedSchema).baselineValues(100L, "John").baselineValues(1000L, "Joe").expectBatchNum(2).go();
}
Aggregations