use of org.apache.drill.exec.physical.resultSet.ResultSetLoader in project drill by apache.
the class TestResultSetLoaderOverflow method testArrayOverflowWithOtherArrays.
/**
* Test the complete set of array overflow cases:
* <ul>
* <li>Array a is written before the column that has overflow,
* and must be copied, in its entirety, to the overflow row.</li>
* <li>Column b causes the overflow.</li>
* <li>Column c is written after the overflow, and should go
* to the look-ahead row.</li>
* <li>Column d is written for a while, then has empties before
* the overflow row, but is written in the overflow row.<li>
* <li>Column e is like d, but is not written in the overflow
* row.</li>
*/
@Test
public void testArrayOverflowWithOtherArrays() {
TupleMetadata schema = new SchemaBuilder().addArray("a", MinorType.INT).addArray("b", MinorType.VARCHAR).addArray("c", MinorType.INT).addArray("d", MinorType.INT).buildSchema();
ResultSetOptions options = new ResultSetOptionBuilder().rowCountLimit(ValueVector.MAX_ROW_COUNT).readerSchema(schema).build();
ResultSetLoader rsLoader = new ResultSetLoaderImpl(fixture.allocator(), options);
RowSetLoader rootWriter = rsLoader.writer();
// Fill batch with rows of with a single array, three values each. Tack on
// a suffix to each so we can be sure the proper data is written and moved
// to the overflow batch.
byte[] value = new byte[512];
Arrays.fill(value, (byte) 'X');
String strValue = new String(value, Charsets.UTF_8);
int aCount = 3;
int bCount = 11;
int cCount = 5;
int dCount = 7;
int cCutoff = ValueVector.MAX_BUFFER_SIZE / value.length / bCount / 2;
ScalarWriter aWriter = rootWriter.array("a").scalar();
ScalarWriter bWriter = rootWriter.array("b").scalar();
ScalarWriter cWriter = rootWriter.array("c").scalar();
ScalarWriter dWriter = rootWriter.array("d").scalar();
int count = 0;
rsLoader.startBatch();
while (rootWriter.start()) {
for (int i = 0; i < aCount; i++) {
aWriter.setInt(count * aCount + i);
}
for (int i = 0; i < bCount; i++) {
String cellValue = strValue + (count * bCount + i);
bWriter.setString(cellValue);
}
if (count < cCutoff) {
for (int i = 0; i < cCount; i++) {
cWriter.setInt(count * cCount + i);
}
}
if (count < cCutoff || rootWriter.isFull()) {
for (int i = 0; i < dCount; i++) {
dWriter.setInt(count * dCount + i);
}
}
rootWriter.save();
count++;
}
// Verify
{
VectorContainer container = rsLoader.harvest();
BatchValidator.validate(container);
RowSet result = fixture.wrap(container);
assertEquals(count - 1, result.rowCount());
RowSetReader reader = result.reader();
ArrayReader aArray = reader.array("a");
ScalarReader aReader = aArray.scalar();
ArrayReader bArray = reader.array("b");
ScalarReader bReader = bArray.scalar();
ArrayReader cArray = reader.array("c");
ScalarReader cReader = cArray.scalar();
ArrayReader dArray = reader.array("d");
ScalarReader dReader = dArray.scalar();
while (reader.next()) {
int rowId = reader.offset();
assertEquals(aCount, aArray.size());
for (int i = 0; i < aCount; i++) {
assertTrue(aArray.next());
assertEquals(rowId * aCount + i, aReader.getInt());
}
assertEquals(bCount, bArray.size());
for (int i = 0; i < bCount; i++) {
assertTrue(bArray.next());
String cellValue = strValue + (rowId * bCount + i);
assertEquals(cellValue, bReader.getString());
}
if (rowId < cCutoff) {
assertEquals(cCount, cArray.size());
for (int i = 0; i < cCount; i++) {
assertTrue(cArray.next());
assertEquals(rowId * cCount + i, cReader.getInt());
}
assertEquals(dCount, dArray.size());
for (int i = 0; i < dCount; i++) {
assertTrue(dArray.next());
assertEquals(rowId * dCount + i, dReader.getInt());
}
} else {
assertEquals(0, cArray.size());
assertEquals(0, dArray.size());
}
}
result.clear();
}
int firstCount = count - 1;
// One row is in the batch. Write more, skipping over the
// initial few values for columns c and d. Column d has a
// roll-over value, c has an empty roll-over.
rsLoader.startBatch();
for (int j = 0; j < 5; j++) {
rootWriter.start();
for (int i = 0; i < aCount; i++) {
aWriter.setInt(count * aCount + i);
}
for (int i = 0; i < bCount; i++) {
String cellValue = strValue + (count * bCount + i);
bWriter.setString(cellValue);
}
if (j > 3) {
for (int i = 0; i < cCount; i++) {
cWriter.setInt(count * cCount + i);
}
for (int i = 0; i < dCount; i++) {
dWriter.setInt(count * dCount + i);
}
}
rootWriter.save();
count++;
}
{
VectorContainer container = rsLoader.harvest();
BatchValidator.validate(container);
RowSet result = fixture.wrap(container);
assertEquals(6, result.rowCount());
RowSetReader reader = result.reader();
ArrayReader aArray = reader.array("a");
ScalarReader aReader = aArray.scalar();
ArrayReader bArray = reader.array("b");
ScalarReader bReader = bArray.scalar();
ArrayReader cArray = reader.array("c");
ScalarReader cReader = cArray.scalar();
ArrayReader dArray = reader.array("d");
ScalarReader dReader = dArray.scalar();
int j = 0;
while (reader.next()) {
int rowId = firstCount + reader.offset();
assertEquals(aCount, aArray.size());
for (int i = 0; i < aCount; i++) {
assertTrue(aArray.next());
assertEquals("Index " + i, rowId * aCount + i, aReader.getInt());
}
assertEquals(bCount, bArray.size());
for (int i = 0; i < bCount; i++) {
assertTrue(bArray.next());
String cellValue = strValue + (rowId * bCount + i);
assertEquals(cellValue, bReader.getString());
}
if (j > 4) {
assertEquals(cCount, cArray.size());
for (int i = 0; i < cCount; i++) {
assertTrue(cArray.next());
assertEquals(rowId * cCount + i, cReader.getInt());
}
} else {
assertEquals(0, cArray.size());
}
if (j == 0 || j > 4) {
assertEquals(dCount, dArray.size());
for (int i = 0; i < dCount; i++) {
assertTrue(dArray.next());
assertEquals(rowId * dCount + i, dReader.getInt());
}
} else {
assertEquals(0, dArray.size());
}
j++;
}
result.clear();
}
rsLoader.close();
}
use of org.apache.drill.exec.physical.resultSet.ResultSetLoader in project drill by apache.
the class TestResultSetLoaderProtocol method testCloseWithoutHarvest.
/**
* Test that memory is released if the loader is closed with an active
* batch (that is, before the batch is harvested.)
*/
@Test
public void testCloseWithoutHarvest() {
TupleMetadata schema = new SchemaBuilder().add("a", MinorType.INT).add("b", MinorType.VARCHAR).buildSchema();
ResultSetLoaderImpl.ResultSetOptions options = new ResultSetOptionBuilder().readerSchema(schema).rowCountLimit(ValueVector.MAX_ROW_COUNT).build();
ResultSetLoader rsLoader = new ResultSetLoaderImpl(fixture.allocator(), options);
RowSetLoader rootWriter = rsLoader.writer();
rsLoader.startBatch();
for (int i = 0; i < 100; i++) {
rootWriter.start();
rootWriter.scalar("a").setInt(i);
rootWriter.scalar("b").setString("b-" + i);
rootWriter.save();
}
// Don't harvest the batch. Allocator will complain if the
// loader does not release memory.
rsLoader.close();
}
use of org.apache.drill.exec.physical.resultSet.ResultSetLoader in project drill by apache.
the class TestResultSetLoaderProtocol method testInitialSchema.
/**
* Provide a schema up front to the loader; schema is built before
* the first row.
* <p>
* Also verifies the test-time method to set a row of values using
* a single method.
*/
@Test
public void testInitialSchema() {
TupleMetadata schema = new SchemaBuilder().add("a", MinorType.INT).addNullable("b", MinorType.INT).add("c", MinorType.VARCHAR).buildSchema();
ResultSetLoaderImpl.ResultSetOptions options = new ResultSetOptionBuilder().readerSchema(schema).build();
ResultSetLoader rsLoader = new ResultSetLoaderImpl(fixture.allocator(), options);
RowSetLoader rootWriter = rsLoader.writer();
rsLoader.startBatch();
rootWriter.addRow(10, 100, "fred").addRow(20, null, "barney").addRow(30, 300, "wilma");
RowSet actual = fixture.wrap(rsLoader.harvest());
RowSet expected = fixture.rowSetBuilder(schema).addRow(10, 100, "fred").addRow(20, null, "barney").addRow(30, 300, "wilma").build();
RowSetUtilities.verify(expected, actual);
rsLoader.close();
}
use of org.apache.drill.exec.physical.resultSet.ResultSetLoader in project drill by apache.
the class TestResultSetLoaderProtocol method testBasics.
@Test
public void testBasics() {
ResultSetLoaderImpl rsLoaderImpl = new ResultSetLoaderImpl(fixture.allocator());
ResultSetLoader rsLoader = rsLoaderImpl;
assertEquals(0, rsLoader.schemaVersion());
assertEquals(ResultSetLoader.DEFAULT_ROW_COUNT, rsLoader.targetRowCount());
assertEquals(ValueVector.MAX_BUFFER_SIZE, rsLoader.targetVectorSize());
assertEquals(0, rsLoader.writer().rowCount());
assertEquals(0, rsLoader.batchCount());
assertEquals(0, rsLoader.totalRowCount());
assertTrue(rsLoader.isProjectionEmpty());
try {
rsLoader.harvest();
fail();
} catch (IllegalStateException e) {
// Expected
}
// Can define schema before starting the first batch.
RowSetLoader rootWriter = rsLoader.writer();
TupleMetadata schema = rootWriter.tupleSchema();
assertEquals(0, schema.size());
MaterializedField fieldA = SchemaBuilder.columnSchema("a", MinorType.INT, DataMode.REQUIRED);
rootWriter.addColumn(fieldA);
assertFalse(rsLoader.isProjectionEmpty());
assertEquals(1, schema.size());
assertTrue(fieldA.isEquivalent(schema.column(0)));
assertSame(schema.metadata(0), schema.metadata("a"));
try {
rootWriter.start();
fail();
} catch (IllegalStateException e) {
// Expected
}
try {
rootWriter.save();
fail();
} catch (IllegalStateException e) {
// Expected
}
// Because writing is an inner loop; no checks are
// done to ensure that writing occurs only in the proper
// state. So, can't test setInt() in the wrong state.
rsLoader.startBatch();
try {
rsLoader.startBatch();
fail();
} catch (IllegalStateException e) {
// Expected
}
assertFalse(rootWriter.isFull());
rootWriter.start();
rootWriter.scalar(0).setInt(100);
assertEquals(0, rootWriter.rowCount());
assertEquals(0, rsLoader.batchCount());
rootWriter.save();
assertEquals(1, rootWriter.rowCount());
assertEquals(1, rsLoader.batchCount());
assertEquals(1, rsLoader.totalRowCount());
// Can add a field after first row, prior rows are
// "back-filled".
MaterializedField fieldB = SchemaBuilder.columnSchema("b", MinorType.INT, DataMode.OPTIONAL);
rootWriter.addColumn(fieldB);
assertEquals(2, schema.size());
assertTrue(fieldB.isEquivalent(schema.column(1)));
assertSame(schema.metadata(1), schema.metadata("b"));
rootWriter.start();
rootWriter.scalar(0).setInt(200);
rootWriter.scalar(1).setInt(210);
rootWriter.save();
assertEquals(2, rootWriter.rowCount());
assertEquals(1, rsLoader.batchCount());
assertEquals(2, rsLoader.totalRowCount());
// Harvest the first batch. Version number is the number
// of columns added.
assertFalse(rootWriter.isFull());
RowSet result = fixture.wrap(rsLoader.harvest());
assertEquals(2, rsLoader.schemaVersion());
assertEquals(0, rootWriter.rowCount());
assertEquals(1, rsLoader.batchCount());
assertEquals(2, rsLoader.totalRowCount());
SingleRowSet expected = fixture.rowSetBuilder(result.batchSchema()).addRow(100, null).addRow(200, 210).build();
RowSetUtilities.verify(expected, result);
try {
rootWriter.start();
fail();
} catch (IllegalStateException e) {
// Expected
}
try {
rsLoader.harvest();
fail();
} catch (IllegalStateException e) {
// Expected
}
try {
rootWriter.save();
fail();
} catch (IllegalStateException e) {
// Expected
}
// Create a second batch
rsLoader.startBatch();
assertEquals(0, rootWriter.rowCount());
assertEquals(1, rsLoader.batchCount());
assertEquals(2, rsLoader.totalRowCount());
rootWriter.start();
rootWriter.scalar(0).setInt(300);
rootWriter.scalar(1).setInt(310);
rootWriter.save();
assertEquals(1, rootWriter.rowCount());
assertEquals(2, rsLoader.batchCount());
assertEquals(3, rsLoader.totalRowCount());
rootWriter.start();
rootWriter.scalar(0).setInt(400);
rootWriter.scalar(1).setInt(410);
rootWriter.save();
// Harvest. Schema has not changed.
result = fixture.wrap(rsLoader.harvest());
assertEquals(2, rsLoader.schemaVersion());
assertEquals(0, rootWriter.rowCount());
assertEquals(2, rsLoader.batchCount());
assertEquals(4, rsLoader.totalRowCount());
expected = fixture.rowSetBuilder(result.batchSchema()).addRow(300, 310).addRow(400, 410).build();
RowSetUtilities.verify(expected, result);
// Next batch. Schema has changed.
rsLoader.startBatch();
rootWriter.start();
rootWriter.scalar(0).setInt(500);
rootWriter.scalar(1).setInt(510);
rootWriter.addColumn(SchemaBuilder.columnSchema("c", MinorType.INT, DataMode.OPTIONAL));
rootWriter.scalar(2).setInt(520);
rootWriter.save();
rootWriter.start();
rootWriter.scalar(0).setInt(600);
rootWriter.scalar(1).setInt(610);
rootWriter.scalar(2).setInt(620);
rootWriter.save();
result = fixture.wrap(rsLoader.harvest());
assertEquals(3, rsLoader.schemaVersion());
expected = fixture.rowSetBuilder(result.batchSchema()).addRow(500, 510, 520).addRow(600, 610, 620).build();
RowSetUtilities.verify(expected, result);
rsLoader.close();
try {
rootWriter.start();
fail();
} catch (IllegalStateException e) {
// Expected
}
try {
rsLoader.writer();
fail();
} catch (IllegalStateException e) {
// Expected
}
try {
rsLoader.startBatch();
fail();
} catch (IllegalStateException e) {
// Expected
}
try {
rsLoader.harvest();
fail();
} catch (IllegalStateException e) {
// Expected
}
try {
rootWriter.save();
fail();
} catch (IllegalStateException e) {
// Expected
}
// Benign to close twice
rsLoader.close();
}
use of org.apache.drill.exec.physical.resultSet.ResultSetLoader in project drill by apache.
the class TestResultSetLoaderDicts method testArrayValue.
@Test
public void testArrayValue() {
final TupleMetadata schema = new SchemaBuilder().add("a", MinorType.INT).addDict("d", MinorType.INT).repeatedValue(MinorType.INT).resumeSchema().buildSchema();
final ResultSetLoaderImpl.ResultSetOptions options = new ResultSetOptionBuilder().readerSchema(schema).build();
final ResultSetLoader rsLoader = new ResultSetLoaderImpl(fixture.allocator(), options);
final RowSetLoader rootWriter = rsLoader.writer();
// Write some rows
rsLoader.startBatch();
rootWriter.addRow(10, map(1, intArray(110, 120, 130), 2, intArray(111, 121))).addRow(20, map()).addRow(30, map(1, intArray(), 2, intArray(310, 320), 3, intArray(311, 321, 331, 341), 4, intArray(312, 322, 332)));
// Validate first batch
RowSet actual = fixture.wrap(rsLoader.harvest());
SingleRowSet expected = fixture.rowSetBuilder(schema).addRow(10, map(1, intArray(110, 120, 130), 2, intArray(111, 121))).addRow(20, map()).addRow(30, map(1, intArray(), 2, intArray(310, 320), 3, intArray(311, 321, 331, 341), 4, intArray(312, 322, 332))).build();
RowSetUtilities.verify(expected, actual);
// Add another rows in the second batch.
rsLoader.startBatch();
rootWriter.addRow(40, map(1, intArray(410, 420))).addRow(50, map(1, intArray(510), 2, intArray(511, 531)));
// Validate first batch. The new array should have been back-filled with
// empty offsets for the missing rows.
actual = fixture.wrap(rsLoader.harvest());
expected = fixture.rowSetBuilder(actual.schema()).addRow(40, map(1, intArray(410, 420))).addRow(50, map(1, intArray(510), 2, intArray(511, 531))).build();
RowSetUtilities.verify(expected, actual);
rsLoader.close();
}
Aggregations