use of org.apache.drill.test.rowSet.schema.SchemaBuilder in project drill by axbaretto.
the class TestNullInputMiniPlan method testJsonInputMixedWithEmptyFiles4.
/**
* Test ScanBatch with mixed json files.
* input is data_file, data_file, empty, empty
*/
@Test
public void testJsonInputMixedWithEmptyFiles4() throws Exception {
RecordBatch scanBatch = createScanBatchFromJson(SINGLE_JSON, SINGLE_JSON2, SINGLE_EMPTY_JSON2, SINGLE_EMPTY_JSON2);
BatchSchema expectedSchema = new SchemaBuilder().addNullable("id", TypeProtos.MinorType.BIGINT).addNullable("name", TypeProtos.MinorType.VARCHAR).build();
new MiniPlanTestBuilder().root(scanBatch).expectSchema(expectedSchema).baselineValues(100L, "John").baselineValues(1000L, "Joe").expectBatchNum(2).go();
}
use of org.apache.drill.test.rowSet.schema.SchemaBuilder in project drill by axbaretto.
the class TestResultSetLoaderMaps method testEmptyMapAddition.
/**
* Test adding an empty map to a loader after writing the first row.
* Then add columns in another batch. Yes, this is a bizarre condition,
* but we must check it anyway for robustness.
*/
@Test
public void testEmptyMapAddition() {
TupleMetadata schema = new SchemaBuilder().add("a", MinorType.INT).buildSchema();
ResultSetLoaderImpl.ResultSetOptions options = new OptionBuilder().setSchema(schema).build();
ResultSetLoader rsLoader = new ResultSetLoaderImpl(fixture.allocator(), options);
assertEquals(1, rsLoader.schemaVersion());
RowSetLoader rootWriter = rsLoader.writer();
// Start without the map. Add a map after the first row.
rsLoader.startBatch();
rootWriter.addRow(10);
int mapIndex = rootWriter.addColumn(SchemaBuilder.columnSchema("m", MinorType.MAP, DataMode.REQUIRED));
TupleWriter mapWriter = rootWriter.tuple(mapIndex);
rootWriter.addRow(20, objArray()).addRow(30, objArray());
RowSet actual = fixture.wrap(rsLoader.harvest());
assertEquals(2, rsLoader.schemaVersion());
assertEquals(3, actual.rowCount());
// Validate first batch
TupleMetadata expectedSchema = new SchemaBuilder().add("a", MinorType.INT).addMap("m").resumeSchema().buildSchema();
SingleRowSet expected = fixture.rowSetBuilder(expectedSchema).addRow(10, objArray()).addRow(20, objArray()).addRow(30, objArray()).build();
new RowSetComparison(expected).verifyAndClearAll(actual);
// Now add another column to the map
rsLoader.startBatch();
mapWriter.addColumn(SchemaBuilder.columnSchema("a", MinorType.VARCHAR, DataMode.REQUIRED));
rootWriter.addRow(40, objArray("fred")).addRow(50, objArray("barney"));
actual = fixture.wrap(rsLoader.harvest());
assertEquals(3, rsLoader.schemaVersion());
assertEquals(2, actual.rowCount());
// Validate first batch
expectedSchema = new SchemaBuilder().add("a", MinorType.INT).addMap("m").add("a", MinorType.VARCHAR).resumeSchema().buildSchema();
expected = fixture.rowSetBuilder(expectedSchema).addRow(40, objArray("fred")).addRow(50, objArray("barney")).build();
new RowSetComparison(expected).verifyAndClearAll(actual);
rsLoader.close();
}
use of org.apache.drill.test.rowSet.schema.SchemaBuilder in project drill by axbaretto.
the class TestResultSetLoaderMaps method testOverwriteRow.
/**
* Version of the {#link TestResultSetLoaderProtocol#testOverwriteRow()} test
* that uses nested columns.
*/
@Test
public void testOverwriteRow() {
TupleMetadata schema = new SchemaBuilder().add("a", MinorType.INT).addMap("m").add("b", MinorType.INT).add("c", MinorType.VARCHAR).resumeSchema().buildSchema();
ResultSetLoaderImpl.ResultSetOptions options = new OptionBuilder().setSchema(schema).setRowCountLimit(ValueVector.MAX_ROW_COUNT).build();
ResultSetLoader rsLoader = new ResultSetLoaderImpl(fixture.allocator(), options);
RowSetLoader rootWriter = rsLoader.writer();
// Can't use the shortcut to populate rows when doing overwrites.
ScalarWriter aWriter = rootWriter.scalar("a");
TupleWriter mWriter = rootWriter.tuple("m");
ScalarWriter bWriter = mWriter.scalar("b");
ScalarWriter cWriter = mWriter.scalar("c");
// Write 100,000 rows, overwriting 99% of them. This will cause vector
// overflow and data corruption if overwrite does not work; but will happily
// produce the correct result if everything works as it should.
byte[] value = new byte[512];
Arrays.fill(value, (byte) 'X');
int count = 0;
rsLoader.startBatch();
while (count < 100_000) {
rootWriter.start();
count++;
aWriter.setInt(count);
bWriter.setInt(count * 10);
cWriter.setBytes(value, value.length);
if (count % 100 == 0) {
rootWriter.save();
}
}
// Verify using a reader.
RowSet result = fixture.wrap(rsLoader.harvest());
assertEquals(count / 100, result.rowCount());
RowSetReader reader = result.reader();
TupleReader mReader = reader.tuple("m");
int rowId = 1;
while (reader.next()) {
assertEquals(rowId * 100, reader.scalar("a").getInt());
assertEquals(rowId * 1000, mReader.scalar("b").getInt());
assertTrue(Arrays.equals(value, mReader.scalar("c").getBytes()));
rowId++;
}
result.clear();
rsLoader.close();
}
use of org.apache.drill.test.rowSet.schema.SchemaBuilder in project drill by axbaretto.
the class TestResultSetLoaderOverflow method testVectorSizeLimit.
/**
* Test that the writer detects a vector overflow. The offending column
* value should be moved to the next batch.
*/
@Test
public void testVectorSizeLimit() {
TupleMetadata schema = new SchemaBuilder().add("s", MinorType.VARCHAR).buildSchema();
ResultSetOptions options = new OptionBuilder().setRowCountLimit(ValueVector.MAX_ROW_COUNT).setSchema(schema).build();
ResultSetLoader rsLoader = new ResultSetLoaderImpl(fixture.allocator(), options);
RowSetLoader rootWriter = rsLoader.writer();
rsLoader.startBatch();
byte[] value = new byte[512];
Arrays.fill(value, (byte) 'X');
int count = 0;
while (!rootWriter.isFull()) {
rootWriter.start();
rootWriter.scalar(0).setBytes(value, value.length);
rootWriter.save();
count++;
}
// Number of rows should be driven by vector size.
// Our row count should include the overflow row
int expectedCount = ValueVector.MAX_BUFFER_SIZE / value.length;
assertEquals(expectedCount + 1, count);
// Loader's row count should include only "visible" rows
assertEquals(expectedCount, rootWriter.rowCount());
// Total count should include invisible and look-ahead rows.
assertEquals(expectedCount + 1, rsLoader.totalRowCount());
// Result should exclude the overflow row
RowSet result = fixture.wrap(rsLoader.harvest());
assertEquals(expectedCount, result.rowCount());
result.clear();
// Next batch should start with the overflow row
rsLoader.startBatch();
assertEquals(1, rootWriter.rowCount());
assertEquals(expectedCount + 1, rsLoader.totalRowCount());
result = fixture.wrap(rsLoader.harvest());
assertEquals(1, result.rowCount());
result.clear();
rsLoader.close();
}
use of org.apache.drill.test.rowSet.schema.SchemaBuilder in project drill by axbaretto.
the class TestResultSetLoaderOverflow method testOversizeArray.
/**
* Case where a single array fills up the vector to the maximum size
* limit. Overflow won't work here; the attempt will fail with a user
* exception.
*/
@Test
public void testOversizeArray() {
TupleMetadata schema = new SchemaBuilder().addArray("s", MinorType.VARCHAR).buildSchema();
ResultSetOptions options = new OptionBuilder().setRowCountLimit(ValueVector.MAX_ROW_COUNT).setSchema(schema).build();
ResultSetLoader rsLoader = new ResultSetLoaderImpl(fixture.allocator(), options);
RowSetLoader rootWriter = rsLoader.writer();
// Create a single array as the column value in the first row. When
// this overflows, an exception is thrown since overflow is not possible.
rsLoader.startBatch();
byte[] value = new byte[473];
Arrays.fill(value, (byte) 'X');
rootWriter.start();
ScalarWriter array = rootWriter.array(0).scalar();
try {
for (int i = 0; i < ValueVector.MAX_ROW_COUNT; i++) {
array.setBytes(value, value.length);
}
fail();
} catch (UserException e) {
assertTrue(e.getMessage().contains("column value is larger than the maximum"));
}
rsLoader.close();
}
Aggregations