use of org.apache.drill.exec.physical.impl.scan.framework.ManagedReader in project drill by apache.
the class TestMockRowReader method testBatchSize.
/**
* Verify limit on individual batch size (limiting row count per batch).
*/
@Test
public void testBatchSize() {
int rowCount = 20;
int batchSize = 10;
MockTableDef.MockColumn[] cols = new MockTableDef.MockColumn[] { new MockTableDef.MockColumn("a", MinorType.INT, DataMode.REQUIRED, null, null, null, null, null, null), new MockTableDef.MockColumn("b", MinorType.VARCHAR, DataMode.REQUIRED, 10, null, null, null, null, null) };
MockTableDef.MockScanEntry entry = new MockTableDef.MockScanEntry(rowCount, true, batchSize, null, cols);
MockSubScanPOP config = new MockSubScanPOP("dummy", true, Collections.singletonList(entry));
ManagedReader<SchemaNegotiator> reader = new ExtendedMockBatchReader(entry);
List<ManagedReader<SchemaNegotiator>> readers = Collections.singletonList(reader);
// Create options and the scan operator
ScanFixture mockBatch = buildScan(config, readers);
ScanOperatorExec scan = mockBatch.scanOp;
// First batch: build schema. The reader helps: it returns an
// empty first batch.
assertTrue(scan.buildSchema());
assertEquals(0, scan.batchAccessor().rowCount());
// Next call, return with data, limited by batch size.
assertTrue(scan.next());
assertEquals(batchSize, scan.batchAccessor().rowCount());
scan.batchAccessor().release();
assertTrue(scan.next());
assertEquals(batchSize, scan.batchAccessor().rowCount());
scan.batchAccessor().release();
// EOF
assertFalse(scan.next());
mockBatch.close();
}
use of org.apache.drill.exec.physical.impl.scan.framework.ManagedReader in project drill by apache.
the class MockScanBatchCreator method extendedMockScan.
private CloseableRecordBatch extendedMockScan(FragmentContext context, MockSubScanPOP config, List<MockScanEntry> entries) {
List<SchemaPath> projList = new LinkedList<>();
projList.add(SchemaPath.STAR_COLUMN);
// Create batch readers up front. Handy when we know there are
// only one or two; else use an iterator and create them on the fly.
final List<ManagedReader<SchemaNegotiator>> readers = new LinkedList<>();
for (final MockTableDef.MockScanEntry e : entries) {
readers.add(new ExtendedMockBatchReader(e));
}
// Limit the batch size to 10 MB, or whatever the operator definition
// specified.
int batchSizeBytes = 10 * 1024 * 1024;
MockTableDef.MockScanEntry first = entries.get(0);
if (first.getBatchSize() > 0) {
batchSizeBytes = first.getBatchSize();
}
// Set the scan to allow the maximum row count, allowing
// each reader to adjust the batch size smaller if desired.
ScanFrameworkBuilder builder = new ScanFrameworkBuilder();
builder.batchByteLimit(batchSizeBytes);
builder.projection(projList);
builder.setReaderFactory(new BasicScanFactory(readers.iterator()));
ManagedScanFramework framework = new ManagedScanFramework(builder);
return new OperatorRecordBatch(context, config, new ScanOperatorExec(framework, false), false);
}
Aggregations