use of org.apache.drill.exec.physical.resultSet.RowSetLoader in project drill by apache.
the class TestResultSetLoaderLimits method testLimit1.
/**
* Pathological limit case: a single row.
*/
@Test
public void testLimit1() {
// Start with a small limit.
ResultSetOptions options = new ResultSetOptionBuilder().limit(1).build();
ResultSetLoader rsLoader = new ResultSetLoaderImpl(fixture.allocator(), options);
assertTrue(rsLoader.startBatch());
assertEquals(1, rsLoader.maxBatchSize());
RowSetLoader rootWriter = rsLoader.writer();
rootWriter.addColumn(SchemaBuilder.columnSchema("s", MinorType.VARCHAR, DataMode.REQUIRED));
rootWriter.addRow("foo");
assertTrue(rootWriter.isFull());
assertFalse(rootWriter.start());
RowSet result = fixture.wrap(rsLoader.harvest());
assertEquals(1, result.rowCount());
result.clear();
assertTrue(rsLoader.atLimit());
rsLoader.close();
}
use of org.apache.drill.exec.physical.resultSet.RowSetLoader in project drill by apache.
the class TestResultSetLoaderLimits method testLimit0.
/**
* Limit 0 is used to obtain only the schema.
*/
@Test
public void testLimit0() {
ResultSetOptions options = new ResultSetOptionBuilder().limit(0).build();
ResultSetLoader rsLoader = new ResultSetLoaderImpl(fixture.allocator(), options);
// Can define a schema-only batch.
assertTrue(rsLoader.startBatch());
RowSetLoader rootWriter = rsLoader.writer();
rootWriter.addColumn(SchemaBuilder.columnSchema("s", MinorType.VARCHAR, DataMode.REQUIRED));
// But, can't add any rows.
assertTrue(rootWriter.isFull());
RowSet result = fixture.wrap(rsLoader.harvest());
assertEquals(0, result.rowCount());
assertTrue(rsLoader.atLimit());
TupleMetadata schema = new SchemaBuilder().add("s", MinorType.VARCHAR).buildSchema();
assertTrue(schema.equals(result.schema()));
result.clear();
// Can't start a data batch.
assertFalse(rsLoader.startBatch());
// Can't start a row.
assertFalse(rootWriter.start());
rsLoader.close();
}
use of org.apache.drill.exec.physical.resultSet.RowSetLoader in project drill by apache.
the class TestResultSetLoaderLimits method testCustomRowLimit.
/**
* Verify that the caller can set a row limit lower than the default.
*/
@Test
public void testCustomRowLimit() {
// Try to set a default value larger than the hard limit. Value
// is truncated to the limit.
ResultSetOptions options = new ResultSetOptionBuilder().rowCountLimit(ValueVector.MAX_ROW_COUNT + 1).build();
assertEquals(ValueVector.MAX_ROW_COUNT, options.rowCountLimit);
// Just a bit of paranoia that we check against the vector limit,
// not any previous value...
options = new ResultSetOptionBuilder().rowCountLimit(ValueVector.MAX_ROW_COUNT + 1).rowCountLimit(TEST_ROW_LIMIT).build();
assertEquals(TEST_ROW_LIMIT, options.rowCountLimit);
options = new ResultSetOptionBuilder().rowCountLimit(TEST_ROW_LIMIT).rowCountLimit(ValueVector.MAX_ROW_COUNT + 1).build();
assertEquals(ValueVector.MAX_ROW_COUNT, options.rowCountLimit);
// Can't set the limit lower than 1
options = new ResultSetOptionBuilder().rowCountLimit(0).build();
assertEquals(1, options.rowCountLimit);
// Do load with a (valid) limit lower than the default.
options = new ResultSetOptionBuilder().rowCountLimit(TEST_ROW_LIMIT).build();
ResultSetLoader rsLoader = new ResultSetLoaderImpl(fixture.allocator(), options);
assertEquals(TEST_ROW_LIMIT, rsLoader.targetRowCount());
RowSetLoader rootWriter = rsLoader.writer();
rootWriter.addColumn(SchemaBuilder.columnSchema("s", MinorType.VARCHAR, DataMode.REQUIRED));
rsLoader.startBatch();
int count = fillToLimit(rootWriter);
assertEquals(TEST_ROW_LIMIT, count);
assertEquals(count, rootWriter.rowCount());
// Should fail to write beyond the row limit
assertFalse(rootWriter.start());
try {
rootWriter.save();
fail();
} catch (IllegalStateException e) {
// Expected
}
rsLoader.harvest().clear();
rsLoader.startBatch();
assertEquals(0, rootWriter.rowCount());
rsLoader.close();
}
use of org.apache.drill.exec.physical.resultSet.RowSetLoader in project drill by apache.
the class TestResultSetLoaderMaps method testNameSpace.
/**
* Verify that map name spaces (and implementations) are
* independent.
*/
@Test
public void testNameSpace() {
final TupleMetadata schema = new SchemaBuilder().add("a", MinorType.INT).addMap("m").add("a", MinorType.INT).addMap("m").add("a", MinorType.INT).resumeMap().resumeSchema().buildSchema();
final ResultSetLoaderImpl.ResultSetOptions options = new ResultSetOptionBuilder().readerSchema(schema).build();
final ResultSetLoader rsLoader = new ResultSetLoaderImpl(fixture.allocator(), options);
assertFalse(rsLoader.isProjectionEmpty());
final RowSetLoader rootWriter = rsLoader.writer();
rsLoader.startBatch();
// Write a row the way that clients will do.
final ScalarWriter a1Writer = rootWriter.scalar("a");
final TupleWriter m1Writer = rootWriter.tuple("m");
final ScalarWriter a2Writer = m1Writer.scalar("a");
final TupleWriter m2Writer = m1Writer.tuple("m");
final ScalarWriter a3Writer = m2Writer.scalar("a");
rootWriter.start();
a1Writer.setInt(11);
a2Writer.setInt(12);
a3Writer.setInt(13);
rootWriter.save();
rootWriter.start();
a1Writer.setInt(21);
a2Writer.setInt(22);
a3Writer.setInt(23);
rootWriter.save();
// Try simplified test format
rootWriter.addRow(31, mapValue(32, mapValue(33)));
// Verify
final RowSet actual = fixture.wrap(rsLoader.harvest());
final SingleRowSet expected = fixture.rowSetBuilder(schema).addRow(11, mapValue(12, mapValue(13))).addRow(21, mapValue(22, mapValue(23))).addRow(31, mapValue(32, mapValue(33))).build();
RowSetUtilities.verify(expected, actual);
rsLoader.close();
}
use of org.apache.drill.exec.physical.resultSet.RowSetLoader in project drill by apache.
the class TestResultSetLoaderMaps method testEmptyMapAddition.
/**
* Test adding an empty map to a loader after writing the first row.
* Then add columns in another batch. Yes, this is a bizarre condition,
* but we must check it anyway for robustness.
*/
@Test
public void testEmptyMapAddition() {
final TupleMetadata schema = new SchemaBuilder().add("a", MinorType.INT).buildSchema();
final ResultSetLoaderImpl.ResultSetOptions options = new ResultSetOptionBuilder().readerSchema(schema).build();
final ResultSetLoader rsLoader = new ResultSetLoaderImpl(fixture.allocator(), options);
assertEquals(1, rsLoader.schemaVersion());
final RowSetLoader rootWriter = rsLoader.writer();
// Start without the map. Add a map after the first row.
rsLoader.startBatch();
rootWriter.addRow(10);
final int mapIndex = rootWriter.addColumn(SchemaBuilder.columnSchema("m", MinorType.MAP, DataMode.REQUIRED));
final TupleWriter mapWriter = rootWriter.tuple(mapIndex);
rootWriter.addRow(20, mapValue()).addRow(30, mapValue());
RowSet actual = fixture.wrap(rsLoader.harvest());
assertEquals(2, rsLoader.schemaVersion());
assertEquals(3, actual.rowCount());
// Validate first batch
TupleMetadata expectedSchema = new SchemaBuilder().add("a", MinorType.INT).addMap("m").resumeSchema().buildSchema();
SingleRowSet expected = fixture.rowSetBuilder(expectedSchema).addRow(10, mapValue()).addRow(20, mapValue()).addRow(30, mapValue()).build();
RowSetUtilities.verify(expected, actual);
// Now add another column to the map
rsLoader.startBatch();
mapWriter.addColumn(SchemaBuilder.columnSchema("a", MinorType.VARCHAR, DataMode.REQUIRED));
rootWriter.addRow(40, mapValue("fred")).addRow(50, mapValue("barney"));
actual = fixture.wrap(rsLoader.harvest());
assertEquals(3, rsLoader.schemaVersion());
assertEquals(2, actual.rowCount());
// Validate first batch
expectedSchema = new SchemaBuilder().add("a", MinorType.INT).addMap("m").add("a", MinorType.VARCHAR).resumeSchema().buildSchema();
expected = fixture.rowSetBuilder(expectedSchema).addRow(40, mapValue("fred")).addRow(50, mapValue("barney")).build();
RowSetUtilities.verify(expected, actual);
rsLoader.close();
}
Aggregations