use of org.apache.drill.exec.physical.resultSet.RowSetLoader in project drill by apache.
the class TestResultSetLoaderOverflow method testLargeArray.
/**
* Create an array that contains more than 64K values. Drill has no numeric
* limit on array lengths. (Well, it does, but the limit is about 2 billion
* which, even for bytes, is too large to fit into a vector...)
*/
@Test
public void testLargeArray() {
ResultSetLoader rsLoader = new ResultSetLoaderImpl(fixture.allocator());
RowSetLoader rootWriter = rsLoader.writer();
MaterializedField field = SchemaBuilder.columnSchema("a", MinorType.INT, DataMode.REPEATED);
rootWriter.addColumn(field);
// Create a single array as the column value in the first row. When
// this overflows, an exception is thrown since overflow is not possible.
rsLoader.startBatch();
rootWriter.start();
ScalarWriter array = rootWriter.array(0).scalar();
try {
for (int i = 0; i < Integer.MAX_VALUE; i++) {
array.setInt(i + 1);
}
fail();
} catch (UserException e) {
// Expected
}
rsLoader.close();
}
use of org.apache.drill.exec.physical.resultSet.RowSetLoader in project drill by apache.
the class TestResultSetLoaderOverflow method testCloseWithOverflow.
/**
* Load a batch to overflow. Then, close the loader with the overflow
* batch unharvested. The Loader should release the memory allocated
* to the unused overflow vectors.
*/
@Test
public void testCloseWithOverflow() {
TupleMetadata schema = new SchemaBuilder().add("s", MinorType.VARCHAR).buildSchema();
ResultSetOptions options = new ResultSetOptionBuilder().rowCountLimit(ValueVector.MAX_ROW_COUNT).readerSchema(schema).build();
ResultSetLoader rsLoader = new ResultSetLoaderImpl(fixture.allocator(), options);
RowSetLoader rootWriter = rsLoader.writer();
rsLoader.startBatch();
byte[] value = new byte[512];
Arrays.fill(value, (byte) 'X');
int count = 0;
while (!rootWriter.isFull()) {
rootWriter.start();
rootWriter.scalar(0).setBytes(value, value.length);
rootWriter.save();
count++;
}
assertTrue(count < ValueVector.MAX_ROW_COUNT);
// Harvest the full batch
VectorContainer container = rsLoader.harvest();
BatchValidator.validate(container);
RowSet result = fixture.wrap(container);
result.clear();
// Close without harvesting the overflow batch.
rsLoader.close();
}
use of org.apache.drill.exec.physical.resultSet.RowSetLoader in project drill by apache.
the class TestResultSetLoaderOverflow method testOversizeArray.
/**
* Case where a single array fills up the vector to the maximum size
* limit. Overflow won't work here; the attempt will fail with a user
* exception.
*/
@Test
public void testOversizeArray() {
TupleMetadata schema = new SchemaBuilder().addArray("s", MinorType.VARCHAR).buildSchema();
ResultSetOptions options = new ResultSetOptionBuilder().rowCountLimit(ValueVector.MAX_ROW_COUNT).readerSchema(schema).build();
ResultSetLoader rsLoader = new ResultSetLoaderImpl(fixture.allocator(), options);
RowSetLoader rootWriter = rsLoader.writer();
// Create a single array as the column value in the first row. When
// this overflows, an exception is thrown since overflow is not possible.
rsLoader.startBatch();
byte[] value = new byte[473];
Arrays.fill(value, (byte) 'X');
rootWriter.start();
ScalarWriter array = rootWriter.array(0).scalar();
try {
for (int i = 0; i < ValueVector.MAX_ROW_COUNT; i++) {
array.setBytes(value, value.length);
}
fail();
} catch (UserException e) {
assertTrue(e.getMessage().contains("column value is larger than the maximum"));
}
rsLoader.close();
}
use of org.apache.drill.exec.physical.resultSet.RowSetLoader in project drill by apache.
the class TestResultSetLoaderOverflow method testBatchSizeLimit.
/**
* Test that the writer detects a vector overflow. The offending column
* value should be moved to the next batch.
*/
@Test
public void testBatchSizeLimit() {
TupleMetadata schema = new SchemaBuilder().add("s", MinorType.VARCHAR).buildSchema();
ResultSetOptions options = new ResultSetOptionBuilder().rowCountLimit(ValueVector.MAX_ROW_COUNT).readerSchema(schema).batchSizeLimit(// Data
8 * 1024 * 1024 + // Offsets, doubled because of +1
2 * ValueVector.MAX_ROW_COUNT * 4).build();
ResultSetLoader rsLoader = new ResultSetLoaderImpl(fixture.allocator(), options);
RowSetLoader rootWriter = rsLoader.writer();
rsLoader.startBatch();
byte[] value = new byte[512];
Arrays.fill(value, (byte) 'X');
// Our row count should include the overflow row
int expectedCount = 8 * 1024 * 1024 / value.length;
// First batch, with overflow
{
int count = 0;
while (!rootWriter.isFull()) {
rootWriter.start();
rootWriter.scalar(0).setBytes(value, value.length);
rootWriter.save();
count++;
}
assertEquals(expectedCount + 1, count);
// Loader's row count should include only "visible" rows
assertEquals(expectedCount, rootWriter.rowCount());
// Total count should include invisible and look-ahead rows.
assertEquals(expectedCount + 1, rsLoader.totalRowCount());
// Result should exclude the overflow row
VectorContainer container = rsLoader.harvest();
BatchValidator.validate(container);
RowSet result = fixture.wrap(container);
assertEquals(expectedCount, result.rowCount());
result.clear();
}
// Next batch should start with the overflow row
{
rsLoader.startBatch();
assertEquals(1, rootWriter.rowCount());
assertEquals(expectedCount + 1, rsLoader.totalRowCount());
VectorContainer container = rsLoader.harvest();
BatchValidator.validate(container);
RowSet result = fixture.wrap(container);
assertEquals(1, result.rowCount());
result.clear();
}
rsLoader.close();
}
use of org.apache.drill.exec.physical.resultSet.RowSetLoader in project drill by apache.
the class TestResultSetLoaderOverflow method testVectorSizeLimit.
/**
* Test that the writer detects a vector overflow. The offending column
* value should be moved to the next batch.
*/
@Test
public void testVectorSizeLimit() {
TupleMetadata schema = new SchemaBuilder().add("s", MinorType.VARCHAR).buildSchema();
ResultSetOptions options = new ResultSetOptionBuilder().rowCountLimit(ValueVector.MAX_ROW_COUNT).readerSchema(schema).build();
ResultSetLoader rsLoader = new ResultSetLoaderImpl(fixture.allocator(), options);
RowSetLoader rootWriter = rsLoader.writer();
rsLoader.startBatch();
byte[] value = new byte[512];
Arrays.fill(value, (byte) 'X');
// Number of rows should be driven by vector size.
// Our row count should include the overflow row
int expectedCount = ValueVector.MAX_BUFFER_SIZE / value.length;
{
int count = 0;
while (!rootWriter.isFull()) {
rootWriter.start();
rootWriter.scalar(0).setBytes(value, value.length);
rootWriter.save();
count++;
}
assertEquals(expectedCount + 1, count);
// Loader's row count should include only "visible" rows
assertEquals(expectedCount, rootWriter.rowCount());
// Total count should include invisible and look-ahead rows.
assertEquals(expectedCount + 1, rsLoader.totalRowCount());
// Result should exclude the overflow row
VectorContainer container = rsLoader.harvest();
BatchValidator.validate(container);
RowSet result = fixture.wrap(container);
assertEquals(expectedCount, result.rowCount());
result.clear();
}
// Next batch should start with the overflow row
{
rsLoader.startBatch();
assertEquals(1, rootWriter.rowCount());
assertEquals(expectedCount + 1, rsLoader.totalRowCount());
VectorContainer container = rsLoader.harvest();
BatchValidator.validate(container);
RowSet result = fixture.wrap(container);
assertEquals(1, result.rowCount());
result.clear();
}
rsLoader.close();
}
Aggregations