use of org.apache.drill.test.rowSet.schema.SchemaBuilder in project drill by axbaretto.
the class TestEmptyInputSql method testQueryEmptyCsv.
/**
* Test select * against empty csv file. * is exapnede into "columns : repeated-varchar",
* which is the default column from reading a csv file.
* @throws Exception
*/
@Test
public void testQueryEmptyCsv() throws Exception {
final BatchSchema expectedSchema = new SchemaBuilder().addArray("columns", TypeProtos.MinorType.VARCHAR).build();
testBuilder().sqlQuery("select * from cp.`%s`", SINGLE_EMPTY_CSV).schemaBaseLine(expectedSchema).build().run();
}
use of org.apache.drill.test.rowSet.schema.SchemaBuilder in project drill by axbaretto.
the class TestStarQueries method testSchemaForStarOrderByLimit.
// DRILL-5845
@Test
public void testSchemaForStarOrderByLimit() throws Exception {
final String query = "select * from cp.`tpch/nation.parquet` order by n_name limit 1";
final BatchSchema expectedSchema = new SchemaBuilder().add("n_nationkey", TypeProtos.MinorType.INT).add("n_name", TypeProtos.MinorType.VARCHAR).add("n_regionkey", TypeProtos.MinorType.INT).add("n_comment", TypeProtos.MinorType.VARCHAR).build();
testBuilder().sqlQuery(query).schemaBaseLine(expectedSchema).build().run();
}
use of org.apache.drill.test.rowSet.schema.SchemaBuilder in project drill by axbaretto.
the class TestExternalSort method testNumericTypes.
/**
* Test union type support in sort using numeric types: BIGINT and FLOAT8
* Drill does not support union types fully. Sort was adapted to handle them.
* This test simply verifies that the sort handles these types, even though
* Drill does not.
*
* @param testLegacy
* true to test the old (pre-1.11) sort, false to test the new (1.11
* and later) sort
* @throws Exception
*/
private void testNumericTypes(boolean testLegacy) throws Exception {
final int record_count = 10000;
final String tableDirName = "numericTypes";
{
final BatchSchema schema = new SchemaBuilder().add("a", Types.required(TypeProtos.MinorType.INT)).build();
final RowSetBuilder rowSetBuilder = new RowSetBuilder(allocator, schema);
for (int i = 0; i <= record_count; i += 2) {
rowSetBuilder.addRow(i);
}
final RowSet rowSet = rowSetBuilder.build();
final File tableFile = createTableFile(tableDirName, "a.json");
new JsonFileBuilder(rowSet).build(tableFile);
rowSet.clear();
}
{
final BatchSchema schema = new SchemaBuilder().add("a", Types.required(TypeProtos.MinorType.FLOAT4)).build();
final RowSetBuilder rowSetBuilder = new RowSetBuilder(allocator, schema);
for (int i = 1; i <= record_count; i += 2) {
rowSetBuilder.addRow((float) i);
}
final RowSet rowSet = rowSetBuilder.build();
final File tableFile = createTableFile(tableDirName, "b.json");
new JsonFileBuilder(rowSet).setCustomFormatter("a", "%.2f").build(tableFile);
rowSet.clear();
}
TestBuilder builder = testBuilder().sqlQuery("select * from dfs.`%s` order by a desc", tableDirName).optionSettingQueriesForTestQuery(getOptions(testLegacy)).ordered().baselineColumns("a");
for (int i = record_count; i >= 0; ) {
builder.baselineValues((long) i--);
if (i >= 0) {
builder.baselineValues((double) i--);
}
}
builder.go();
}
use of org.apache.drill.test.rowSet.schema.SchemaBuilder in project drill by axbaretto.
the class TestOperatorRecordBatch method testBatchAccessor.
/**
* The record batch abstraction has a bunch of methods to work with a vector container.
* Rather than simply exposing the container itself, the batch instead exposes various
* container operations. Probably an artifact of its history. In any event, make
* sure those methods are passed through to the container accessor.
*/
@Test
public void testBatchAccessor() {
BatchSchema schema = new SchemaBuilder().add("a", MinorType.INT).add("b", MinorType.VARCHAR).build();
SingleRowSet rs = fixture.rowSetBuilder(schema).addRow(10, "fred").addRow(20, "wilma").build();
MockOperatorExec opExec = new MockOperatorExec(rs.container());
opExec.nextCalls = 1;
try (OperatorRecordBatch opBatch = makeOpBatch(opExec)) {
assertEquals(IterOutcome.OK_NEW_SCHEMA, opBatch.next());
assertEquals(schema, opBatch.getSchema());
assertEquals(2, opBatch.getRecordCount());
assertSame(rs.container(), opBatch.getOutgoingContainer());
Iterator<VectorWrapper<?>> iter = opBatch.iterator();
assertEquals("a", iter.next().getValueVector().getField().getName());
assertEquals("b", iter.next().getValueVector().getField().getName());
// Not a full test of the schema path; just make sure that the
// pass-through to the Vector Container works.
SchemaPath path = SchemaPath.create(NamePart.newBuilder().setName("a").build());
TypedFieldId id = opBatch.getValueVectorId(path);
assertEquals(MinorType.INT, id.getFinalType().getMinorType());
assertEquals(1, id.getFieldIds().length);
assertEquals(0, id.getFieldIds()[0]);
path = SchemaPath.create(NamePart.newBuilder().setName("b").build());
id = opBatch.getValueVectorId(path);
assertEquals(MinorType.VARCHAR, id.getFinalType().getMinorType());
assertEquals(1, id.getFieldIds().length);
assertEquals(1, id.getFieldIds()[0]);
// Sanity check of getValueAccessorById()
VectorWrapper<?> w = opBatch.getValueAccessorById(IntVector.class, 0);
assertNotNull(w);
assertEquals("a", w.getValueVector().getField().getName());
w = opBatch.getValueAccessorById(VarCharVector.class, 1);
assertNotNull(w);
assertEquals("b", w.getValueVector().getField().getName());
try {
opBatch.getSelectionVector2();
fail();
} catch (UnsupportedOperationException e) {
// Expected
}
try {
opBatch.getSelectionVector4();
fail();
} catch (UnsupportedOperationException e) {
// Expected
}
} catch (Exception e) {
fail(e.getMessage());
}
assertTrue(opExec.closeCalled);
}
use of org.apache.drill.test.rowSet.schema.SchemaBuilder in project drill by axbaretto.
the class TestOperatorRecordBatch method testSv2.
/**
* Test that an SV2 is properly handled by the proper container accessor.
*/
@Test
public void testSv2() {
BatchSchema schema = new SchemaBuilder().add("a", MinorType.INT).add("b", MinorType.VARCHAR).build();
SingleRowSet rs = fixture.rowSetBuilder(schema).addRow(10, "fred").addRow(20, "wilma").withSv2().build();
ContainerAndSv2Accessor accessor = new ContainerAndSv2Accessor();
accessor.setContainer(rs.container());
accessor.setSelectionVector(rs.getSv2());
MockOperatorExec opExec = new MockOperatorExec(accessor);
opExec.nextCalls = 1;
try (OperatorRecordBatch opBatch = makeOpBatch(opExec)) {
assertEquals(IterOutcome.OK_NEW_SCHEMA, opBatch.next());
assertSame(rs.getSv2(), opBatch.getSelectionVector2());
} catch (Exception e) {
fail();
}
assertTrue(opExec.closeCalled);
// Must release SV2
rs.clear();
}
Aggregations