use of org.apache.drill.test.rowSet.schema.SchemaBuilder in project drill by axbaretto.
the class TestSchemaBuilder method testDecimal.
/**
* Test the ability to specify decimal precision and scale. Decimal is
* broken in Drill, so we don't bother about decimals in unions,
* lists or repeated lists, though those methods could be added.
*/
@Test
public void testDecimal() {
TupleMetadata schema = new SchemaBuilder().addDecimal("a", MinorType.DECIMAL18, DataMode.OPTIONAL, 5, 2).addDecimal("b", MinorType.DECIMAL18, DataMode.REQUIRED, 6, 3).addDecimal("c", MinorType.DECIMAL18, DataMode.REPEATED, 7, 4).addMap("m").addDecimal("d", MinorType.DECIMAL18, DataMode.OPTIONAL, 8, 1).resumeSchema().buildSchema();
// Use name methods, just for variety
ColumnMetadata a = schema.metadata("a");
assertEquals(DataMode.OPTIONAL, a.mode());
assertEquals(5, a.precision());
assertEquals(2, a.scale());
ColumnMetadata b = schema.metadata("b");
assertEquals(DataMode.REQUIRED, b.mode());
assertEquals(6, b.precision());
assertEquals(3, b.scale());
ColumnMetadata c = schema.metadata("c");
assertEquals(DataMode.REPEATED, c.mode());
assertEquals(7, c.precision());
assertEquals(4, c.scale());
ColumnMetadata d = schema.metadata("m").mapSchema().metadata("d");
assertEquals(DataMode.OPTIONAL, d.mode());
assertEquals(8, d.precision());
assertEquals(1, d.scale());
}
use of org.apache.drill.test.rowSet.schema.SchemaBuilder in project drill by axbaretto.
the class ExampleTest method secondTest.
/**
* <p>
* Example that uses the fixture builder to build a cluster fixture. Lets
* you set configuration (boot-time) options, session options, system options
* and more.
* </p>
* <p>
* You can write test files to the {@link BaseDirTestWatcher#getRootDir()} and query them in the test.
* </p>
* <p>
* Also shows how to display the plan JSON and just run a query silently,
* getting just the row count, batch count and run time.
* </p>
* @throws Exception if anything goes wrong
*/
@Test
public void secondTest() throws Exception {
try (RootAllocator allocator = new RootAllocator(100_000_000)) {
final File tableFile = dirTestWatcher.getRootDir().toPath().resolve("employee.json").toFile();
final BatchSchema schema = new SchemaBuilder().add("id", Types.required(TypeProtos.MinorType.VARCHAR)).add("name", Types.required(TypeProtos.MinorType.VARCHAR)).build();
final RowSet rowSet = new RowSetBuilder(allocator, schema).addRow("1", "kiwi").addRow("2", "watermelon").build();
new JsonFileBuilder(rowSet).build(tableFile);
rowSet.clear();
ClusterFixtureBuilder builder = ClusterFixture.builder(dirTestWatcher).configProperty(ExecConstants.SLICE_TARGET, 10);
try (ClusterFixture cluster = builder.build();
ClientFixture client = cluster.clientFixture()) {
String sql = "SELECT * FROM `dfs`.`test/employee.json`";
System.out.println(client.queryBuilder().sql(sql).explainJson());
QuerySummary results = client.queryBuilder().sql(sql).run();
System.out.println(String.format("Read %d rows", results.recordCount()));
// Usually we want to test something. Here, just test that we got
// the 2 records.
assertEquals(2, results.recordCount());
}
}
}
use of org.apache.drill.test.rowSet.schema.SchemaBuilder in project drill by axbaretto.
the class DummyWriterTest method testDummyMap.
/**
* Test a dummy map or map array. A (non-enforced) rule is that such maps
* contain only dummy writers. The writers act like "real" writers.
*/
@Test
public void testDummyMap() {
TupleMetadata schema = new SchemaBuilder().addMap("m1").add("a", MinorType.INT).addArray("b", MinorType.VARCHAR).resumeSchema().addMapArray("m2").add("c", MinorType.INT).resumeSchema().buildSchema();
List<AbstractObjectWriter> writers = new ArrayList<>();
{
schema.metadata("m1").setProjected(false);
TupleMetadata mapSchema = schema.metadata("m1").mapSchema();
List<AbstractObjectWriter> members = new ArrayList<>();
members.add(ColumnWriterFactory.buildColumnWriter(mapSchema.metadata("a"), null));
members.add(ColumnWriterFactory.buildColumnWriter(mapSchema.metadata("b"), null));
writers.add(ColumnWriterFactory.buildMapWriter(schema.metadata("m1"), null, members));
}
{
schema.metadata("m2").setProjected(false);
TupleMetadata mapSchema = schema.metadata("m2").mapSchema();
List<AbstractObjectWriter> members = new ArrayList<>();
members.add(ColumnWriterFactory.buildColumnWriter(mapSchema.metadata("c"), null));
writers.add(ColumnWriterFactory.buildMapWriter(schema.metadata("m2"), null, members));
}
AbstractTupleWriter rootWriter = new RootWriterFixture(schema, writers);
// Events are ignored.
rootWriter.startWrite();
rootWriter.startRow();
// Dummy columns seem real.
rootWriter.tuple("m1").scalar("a").setInt(20);
rootWriter.tuple(0).array("b").scalar().setString("foo");
// Dummy array map seems real.
rootWriter.array("m2").tuple().scalar("c").setInt(30);
rootWriter.array("m2").save();
rootWriter.array(1).tuple().scalar(0).setInt(40);
rootWriter.array(1).save();
// More ignored events.
rootWriter.restartRow();
rootWriter.saveRow();
rootWriter.endWrite();
}
use of org.apache.drill.test.rowSet.schema.SchemaBuilder in project drill by axbaretto.
the class RowSetTest method testMapStructure.
/**
* Test a simple map structure at the top level of a row.
*
* @throws VectorOverflowException should never occur
*/
@Test
public void testMapStructure() {
TupleMetadata schema = new SchemaBuilder().add("a", MinorType.INT).addMap("m").addArray("b", MinorType.INT).resumeSchema().buildSchema();
ExtendableRowSet rowSet = fixture.rowSet(schema);
RowSetWriter writer = rowSet.writer();
// Map and Int
// Test Invariants
assertEquals(ObjectType.SCALAR, writer.column("a").type());
assertEquals(ObjectType.SCALAR, writer.column(0).type());
assertEquals(ObjectType.TUPLE, writer.column("m").type());
assertEquals(ObjectType.TUPLE, writer.column(1).type());
assertSame(writer.column(1).tuple(), writer.tuple(1));
TupleWriter mapWriter = writer.column(1).tuple();
assertEquals(ObjectType.SCALAR, mapWriter.column("b").array().entry().type());
assertEquals(ObjectType.SCALAR, mapWriter.column("b").array().entryType());
ScalarWriter aWriter = writer.column("a").scalar();
ScalarWriter bWriter = writer.column("m").tuple().column("b").array().entry().scalar();
assertSame(bWriter, writer.tuple(1).array(0).scalar());
assertEquals(ValueType.INTEGER, bWriter.valueType());
try {
writer.column(1).scalar();
fail();
} catch (UnsupportedOperationException e) {
// Expected
}
try {
writer.column(1).array();
fail();
} catch (UnsupportedOperationException e) {
// Expected
}
// Write data
aWriter.setInt(10);
bWriter.setInt(11);
bWriter.setInt(12);
writer.save();
aWriter.setInt(20);
bWriter.setInt(21);
bWriter.setInt(22);
writer.save();
aWriter.setInt(30);
bWriter.setInt(31);
bWriter.setInt(32);
writer.save();
// Finish the row set and get a reader.
SingleRowSet actual = writer.done();
RowSetReader reader = actual.reader();
assertEquals(ObjectType.SCALAR, reader.column("a").type());
assertEquals(ObjectType.SCALAR, reader.column(0).type());
assertEquals(ObjectType.TUPLE, reader.column("m").type());
assertEquals(ObjectType.TUPLE, reader.column(1).type());
assertSame(reader.column(1).tuple(), reader.tuple(1));
ScalarReader aReader = reader.column(0).scalar();
TupleReader mReader = reader.column(1).tuple();
assertEquals(ObjectType.SCALAR, mReader.column("b").array().entryType());
ScalarElementReader bReader = mReader.column(0).elements();
assertEquals(ValueType.INTEGER, bReader.valueType());
assertTrue(reader.next());
assertEquals(10, aReader.getInt());
assertEquals(11, bReader.getInt(0));
assertEquals(12, bReader.getInt(1));
assertTrue(reader.next());
assertEquals(20, aReader.getInt());
assertEquals(21, bReader.getInt(0));
assertEquals(22, bReader.getInt(1));
assertTrue(reader.next());
assertEquals(30, aReader.getInt());
assertEquals(31, bReader.getInt(0));
assertEquals(32, bReader.getInt(1));
assertFalse(reader.next());
// Verify that the map accessor's value count was set.
@SuppressWarnings("resource") MapVector mapVector = (MapVector) actual.container().getValueVector(1).getValueVector();
assertEquals(actual.rowCount(), mapVector.getAccessor().getValueCount());
SingleRowSet expected = fixture.rowSetBuilder(schema).addRow(10, objArray(intArray(11, 12))).addRow(20, objArray(intArray(21, 22))).addRow(30, objArray(intArray(31, 32))).build();
new RowSetComparison(expected).verifyAndClearAll(actual);
}
use of org.apache.drill.test.rowSet.schema.SchemaBuilder in project drill by axbaretto.
the class RowSetTest method testBufferBounds.
/**
* Test filling a row set up to the maximum vector size.
* Values in the first column are small enough to prevent filling to the
* maximum buffer size, but values in the second column
* will reach maximum buffer size before maximum row size.
* The result should be the number of rows that fit, with the
* partial last row not counting. (A complete application would
* reload the partial row into a new row set.)
*/
@Test
public void testBufferBounds() {
BatchSchema batchSchema = new SchemaBuilder().add("a", MinorType.INT).add("b", MinorType.VARCHAR).build();
String varCharValue;
try {
byte[] rawValue = new byte[512];
Arrays.fill(rawValue, (byte) 'X');
varCharValue = new String(rawValue, "UTF-8");
} catch (UnsupportedEncodingException e) {
throw new IllegalStateException(e);
}
ExtendableRowSet rs = fixture.rowSet(batchSchema);
RowSetWriter writer = rs.writer();
int count = 0;
try {
for (; ; ) {
writer.scalar(0).setInt(count);
writer.scalar(1).setString(varCharValue);
// Won't get here on overflow.
writer.save();
count++;
}
} catch (IndexOutOfBoundsException e) {
assertTrue(e.getMessage().contains("Overflow"));
}
writer.done();
assertTrue(count < ValueVector.MAX_ROW_COUNT);
assertEquals(count, writer.rowIndex());
assertEquals(count, rs.rowCount());
rs.clear();
}
Aggregations