use of org.apache.drill.exec.physical.resultSet.impl.ResultSetLoaderImpl.ResultSetOptions in project drill by apache.
the class TestResultSetLoaderLimits method testLimit0.
/**
* Limit 0 is used to obtain only the schema.
*/
@Test
public void testLimit0() {
ResultSetOptions options = new ResultSetOptionBuilder().limit(0).build();
ResultSetLoader rsLoader = new ResultSetLoaderImpl(fixture.allocator(), options);
// Can define a schema-only batch.
assertTrue(rsLoader.startBatch());
RowSetLoader rootWriter = rsLoader.writer();
rootWriter.addColumn(SchemaBuilder.columnSchema("s", MinorType.VARCHAR, DataMode.REQUIRED));
// But, can't add any rows.
assertTrue(rootWriter.isFull());
RowSet result = fixture.wrap(rsLoader.harvest());
assertEquals(0, result.rowCount());
assertTrue(rsLoader.atLimit());
TupleMetadata schema = new SchemaBuilder().add("s", MinorType.VARCHAR).buildSchema();
assertTrue(schema.equals(result.schema()));
result.clear();
// Can't start a data batch.
assertFalse(rsLoader.startBatch());
// Can't start a row.
assertFalse(rootWriter.start());
rsLoader.close();
}
use of org.apache.drill.exec.physical.resultSet.impl.ResultSetLoaderImpl.ResultSetOptions in project drill by apache.
the class TestResultSetLoaderLimits method testCustomRowLimit.
/**
* Verify that the caller can set a row limit lower than the default.
*/
@Test
public void testCustomRowLimit() {
// Try to set a default value larger than the hard limit. Value
// is truncated to the limit.
ResultSetOptions options = new ResultSetOptionBuilder().rowCountLimit(ValueVector.MAX_ROW_COUNT + 1).build();
assertEquals(ValueVector.MAX_ROW_COUNT, options.rowCountLimit);
// Just a bit of paranoia that we check against the vector limit,
// not any previous value...
options = new ResultSetOptionBuilder().rowCountLimit(ValueVector.MAX_ROW_COUNT + 1).rowCountLimit(TEST_ROW_LIMIT).build();
assertEquals(TEST_ROW_LIMIT, options.rowCountLimit);
options = new ResultSetOptionBuilder().rowCountLimit(TEST_ROW_LIMIT).rowCountLimit(ValueVector.MAX_ROW_COUNT + 1).build();
assertEquals(ValueVector.MAX_ROW_COUNT, options.rowCountLimit);
// Can't set the limit lower than 1
options = new ResultSetOptionBuilder().rowCountLimit(0).build();
assertEquals(1, options.rowCountLimit);
// Do load with a (valid) limit lower than the default.
options = new ResultSetOptionBuilder().rowCountLimit(TEST_ROW_LIMIT).build();
ResultSetLoader rsLoader = new ResultSetLoaderImpl(fixture.allocator(), options);
assertEquals(TEST_ROW_LIMIT, rsLoader.targetRowCount());
RowSetLoader rootWriter = rsLoader.writer();
rootWriter.addColumn(SchemaBuilder.columnSchema("s", MinorType.VARCHAR, DataMode.REQUIRED));
rsLoader.startBatch();
int count = fillToLimit(rootWriter);
assertEquals(TEST_ROW_LIMIT, count);
assertEquals(count, rootWriter.rowCount());
// Should fail to write beyond the row limit
assertFalse(rootWriter.start());
try {
rootWriter.save();
fail();
} catch (IllegalStateException e) {
// Expected
}
rsLoader.harvest().clear();
rsLoader.startBatch();
assertEquals(0, rootWriter.rowCount());
rsLoader.close();
}
use of org.apache.drill.exec.physical.resultSet.impl.ResultSetLoaderImpl.ResultSetOptions in project drill by apache.
the class TestResultSetSchemaChange method testSchemaChangeWithOverflow.
/**
* Test a schema change on the row that overflows. If the
* new column is added after overflow, it will appear as
* a schema-change in the following batch. This is fine as
* we are essentially time-shifting: pretending that the
* overflow row was written in the next batch (which, in
* fact, it is: that's what overflow means.)
*/
@Test
public void testSchemaChangeWithOverflow() {
ResultSetOptions options = new ResultSetOptionBuilder().rowCountLimit(ValueVector.MAX_ROW_COUNT).build();
ResultSetLoader rsLoader = new ResultSetLoaderImpl(fixture.allocator(), options);
RowSetLoader rootWriter = rsLoader.writer();
rootWriter.addColumn(SchemaBuilder.columnSchema("a", MinorType.VARCHAR, DataMode.REQUIRED));
rsLoader.startBatch();
byte[] value = new byte[512];
Arrays.fill(value, (byte) 'X');
int count = 0;
while (!rootWriter.isFull()) {
rootWriter.start();
rootWriter.scalar(0).setBytes(value, value.length);
if (rootWriter.isFull()) {
rootWriter.addColumn(SchemaBuilder.columnSchema("b", MinorType.INT, DataMode.OPTIONAL));
rootWriter.scalar(1).setInt(count);
// Add a Varchar to ensure its offset fiddling is done properly
rootWriter.addColumn(SchemaBuilder.columnSchema("c", MinorType.VARCHAR, DataMode.OPTIONAL));
rootWriter.scalar(2).setString("c-" + count);
// Allow adding a required column at this point.
// (Not intuitively obvious that this should work; we back-fill
// with zeros.)
rootWriter.addColumn(SchemaBuilder.columnSchema("d", MinorType.INT, DataMode.REQUIRED));
}
rootWriter.save();
count++;
}
// Result should include only the first column.
SchemaBuilder schemaBuilder = new SchemaBuilder().add("a", MinorType.VARCHAR);
BatchSchema expectedSchema = new BatchSchemaBuilder().withSchemaBuilder(schemaBuilder).build();
RowSet result = fixture.wrap(rsLoader.harvest());
assertTrue(result.batchSchema().isEquivalent(expectedSchema));
assertEquals(count - 1, result.rowCount());
result.clear();
assertEquals(1, rsLoader.schemaVersion());
// Double check: still can add a required column after
// starting the next batch. (No longer in overflow state.)
rsLoader.startBatch();
rootWriter.addColumn(SchemaBuilder.columnSchema("e", MinorType.INT, DataMode.REQUIRED));
// Next batch should start with the overflow row, including
// the column added at the end of the previous batch, after
// overflow.
result = fixture.wrap(rsLoader.harvest());
assertEquals(5, rsLoader.schemaVersion());
assertEquals(1, result.rowCount());
BatchSchemaBuilder batchSchemaBuilder = new BatchSchemaBuilder(expectedSchema);
batchSchemaBuilder.schemaBuilder().addNullable("b", MinorType.INT).addNullable("c", MinorType.VARCHAR).add("d", MinorType.INT).add("e", MinorType.INT);
expectedSchema = batchSchemaBuilder.build();
assertTrue(result.batchSchema().isEquivalent(expectedSchema));
RowSetReader reader = result.reader();
reader.next();
assertEquals(count - 1, reader.scalar(1).getInt());
assertEquals("c-" + (count - 1), reader.scalar(2).getString());
assertEquals(0, reader.scalar("d").getInt());
assertEquals(0, reader.scalar("e").getInt());
result.clear();
rsLoader.close();
}
Aggregations