use of org.apache.drill.exec.physical.rowSet.RowSet in project drill by apache.
the class TestResultSetLoaderMaps method testMapWithOverflow.
/**
* Create a schema with a map, then trigger an overflow on one of the columns
* in the map. Proper overflow handling should occur regardless of nesting
* depth.
*/
@Test
public void testMapWithOverflow() {
final TupleMetadata schema = new SchemaBuilder().add("a", MinorType.INT).addMap("m1").add("b", MinorType.INT).addMap("m2").add("c", // Before overflow, written
MinorType.INT).add("d", MinorType.VARCHAR).add("e", // After overflow, not yet written
MinorType.INT).resumeMap().resumeSchema().buildSchema();
final ResultSetLoaderImpl.ResultSetOptions options = new ResultSetOptionBuilder().readerSchema(schema).rowCountLimit(ValueVector.MAX_ROW_COUNT).build();
final ResultSetLoader rsLoader = new ResultSetLoaderImpl(fixture.allocator(), options);
final RowSetLoader rootWriter = rsLoader.writer();
final byte[] value = new byte[512];
Arrays.fill(value, (byte) 'X');
int count = 0;
rsLoader.startBatch();
while (!rootWriter.isFull()) {
rootWriter.addRow(count, mapValue(count * 10, mapValue(count * 100, value, count * 1000)));
count++;
}
// Our row count should include the overflow row
final int expectedCount = ValueVector.MAX_BUFFER_SIZE / value.length;
assertEquals(expectedCount + 1, count);
// Loader's row count should include only "visible" rows
assertEquals(expectedCount, rootWriter.rowCount());
// Total count should include invisible and look-ahead rows.
assertEquals(expectedCount + 1, rsLoader.totalRowCount());
// Result should exclude the overflow row
RowSet result = fixture.wrap(rsLoader.harvest());
assertEquals(expectedCount, result.rowCount());
// Ensure the odd map vector value count variable is set correctly.
final MapVector m1Vector = (MapVector) result.container().getValueVector(1).getValueVector();
assertEquals(expectedCount, m1Vector.getAccessor().getValueCount());
final MapVector m2Vector = (MapVector) m1Vector.getChildByOrdinal(1);
assertEquals(expectedCount, m2Vector.getAccessor().getValueCount());
result.clear();
// Next batch should start with the overflow row
rsLoader.startBatch();
assertEquals(1, rootWriter.rowCount());
assertEquals(expectedCount + 1, rsLoader.totalRowCount());
result = fixture.wrap(rsLoader.harvest());
assertEquals(1, result.rowCount());
result.clear();
rsLoader.close();
}
use of org.apache.drill.exec.physical.rowSet.RowSet in project drill by apache.
the class TestResultSetLoaderMaps method testMapAddition.
/**
* Test adding a map to a loader after writing the first row.
*/
@Test
public void testMapAddition() {
final TupleMetadata schema = new SchemaBuilder().add("a", MinorType.INT).buildSchema();
final ResultSetLoaderImpl.ResultSetOptions options = new ResultSetOptionBuilder().readerSchema(schema).build();
final ResultSetLoader rsLoader = new ResultSetLoaderImpl(fixture.allocator(), options);
assertEquals(1, rsLoader.schemaVersion());
final RowSetLoader rootWriter = rsLoader.writer();
// Start without the map. Add a map after the first row.
rsLoader.startBatch();
rootWriter.addRow(10);
final int mapIndex = rootWriter.addColumn(SchemaBuilder.columnSchema("m", MinorType.MAP, DataMode.REQUIRED));
final TupleWriter mapWriter = rootWriter.tuple(mapIndex);
// Add a column to the map with the same name as the top-level column.
// Verifies that the name spaces are independent.
final int colIndex = mapWriter.addColumn(SchemaBuilder.columnSchema("a", MinorType.VARCHAR, DataMode.REQUIRED));
assertEquals(0, colIndex);
// Ensure metadata was added
assertTrue(mapWriter.tupleSchema().size() == 1);
assertSame(mapWriter.tupleSchema(), mapWriter.schema().tupleSchema());
assertSame(mapWriter.tupleSchema().metadata(colIndex), mapWriter.scalar(colIndex).schema());
rootWriter.addRow(20, mapValue("fred")).addRow(30, mapValue("barney"));
final RowSet actual = fixture.wrap(rsLoader.harvest());
assertEquals(3, rsLoader.schemaVersion());
assertEquals(3, actual.rowCount());
final MapVector mapVector = (MapVector) actual.container().getValueVector(1).getValueVector();
final MaterializedField mapField = mapVector.getField();
assertEquals(1, mapField.getChildren().size());
assertTrue(mapWriter.scalar(colIndex).schema().schema().isEquivalent(mapField.getChildren().iterator().next()));
// Validate first batch
final TupleMetadata expectedSchema = new SchemaBuilder().add("a", MinorType.INT).addMap("m").add("a", MinorType.VARCHAR).resumeSchema().buildSchema();
final SingleRowSet expected = fixture.rowSetBuilder(expectedSchema).addRow(10, mapValue("")).addRow(20, mapValue("fred")).addRow(30, mapValue("barney")).build();
RowSetUtilities.verify(expected, actual);
rsLoader.close();
}
use of org.apache.drill.exec.physical.rowSet.RowSet in project drill by apache.
the class TestResultSetCopier method testArrays.
@Test
public void testArrays() {
ResultSetCopier copier = newCopier(new ArrayGen());
copier.startOutputBatch();
copier.nextInputBatch();
copier.copyAllRows();
RowSet result = fixture.wrap(copier.harvest());
ArrayGen verifierGen = new ArrayGen();
verifierGen.next();
RowSet expected = RowSets.wrap(verifierGen.batch());
RowSetUtilities.verify(expected, result);
copier.close();
}
use of org.apache.drill.exec.physical.rowSet.RowSet in project drill by apache.
the class TestResultSetCopier method testMaps.
@Test
public void testMaps() {
ResultSetCopier copier = newCopier(new MapGen());
copier.startOutputBatch();
copier.nextInputBatch();
copier.copyAllRows();
RowSet result = fixture.wrap(copier.harvest());
MapGen verifierGen = new MapGen();
verifierGen.next();
RowSet expected = RowSets.wrap(verifierGen.batch());
RowSetUtilities.verify(expected, result);
copier.close();
}
use of org.apache.drill.exec.physical.rowSet.RowSet in project drill by apache.
the class TestResultSetCopier method testMultiOutput.
@Test
public void testMultiOutput() {
// Equivalent of operator start() method.
DataGen dataGen = new DataGen(15, 2);
ResultSetOptionBuilder options = new ResultSetOptionBuilder().rowCountLimit(12);
ResultSetCopier copier = newCopier(dataGen, options);
// Equivalent of an entire operator run
DataGen validatorGen = new DataGen(12, 2);
int outputCount = 0;
while (true) {
// Equivalent of operator next() method
copier.startOutputBatch();
while (!copier.isOutputFull()) {
if (!copier.nextInputBatch()) {
break;
}
copier.copyAllRows();
}
if (!copier.hasOutputRows()) {
break;
}
// Equivalent of sending downstream
RowSet result = fixture.wrap(copier.harvest());
validatorGen.next();
RowSet expected = RowSets.wrap(validatorGen.batch());
RowSetUtilities.verify(expected, result, result.rowCount());
outputCount++;
}
// Ensure more than one output batch.
assertTrue(outputCount > 1);
// Ensure all rows generated.
assertEquals(30, dataGen.rowCount);
// Simulate operator close();
copier.close();
}
Aggregations