use of org.apache.drill.exec.physical.impl.scan.framework.ManagedReader in project drill by apache.
the class JdbcScanBatchCreator method createBuilder.
private ScanFrameworkBuilder createBuilder(OptionManager options, JdbcSubScan subScan) {
JdbcStorageConfig config = subScan.getConfig();
ScanFrameworkBuilder builder = new ScanFrameworkBuilder();
builder.projection(subScan.getColumns());
builder.setUserName(subScan.getUserName());
JdbcStoragePlugin plugin = subScan.getPlugin();
List<ManagedReader<SchemaNegotiator>> readers = Collections.singletonList(new JdbcBatchReader(plugin.getDataSource(), subScan.getSql(), subScan.getColumns()));
ManagedScanFramework.ReaderFactory readerFactory = new BasicScanFactory(readers.iterator());
builder.setReaderFactory(readerFactory);
builder.nullType(Types.optional(MinorType.VARCHAR));
return builder;
}
use of org.apache.drill.exec.physical.impl.scan.framework.ManagedReader in project drill by apache.
the class TestMockRowReader method testOverflow.
/**
* Test a mock varchar column large enough to cause vector overflow.
*/
@Test
public void testOverflow() {
int rowCount = ValueVector.MAX_ROW_COUNT;
MockTableDef.MockColumn[] cols = new MockTableDef.MockColumn[] { new MockTableDef.MockColumn("a", MinorType.INT, DataMode.REQUIRED, null, null, null, null, null, null), new MockTableDef.MockColumn("b", MinorType.VARCHAR, DataMode.REQUIRED, 1000, null, null, null, null, null) };
MockTableDef.MockScanEntry entry = new MockTableDef.MockScanEntry(rowCount, true, null, null, cols);
MockSubScanPOP config = new MockSubScanPOP("dummy", true, Collections.singletonList(entry));
ManagedReader<SchemaNegotiator> reader = new ExtendedMockBatchReader(entry);
@SuppressWarnings("unchecked") List<ManagedReader<SchemaNegotiator>> readers = Collections.singletonList(reader);
// Create options and the scan operator
ScanFixture mockBatch = buildScan(config, readers);
ScanOperatorExec scan = mockBatch.scanOp;
// First batch: build schema. The reader helps: it returns an
// empty first batch.
assertTrue(scan.buildSchema());
assertEquals(0, scan.batchAccessor().rowCount());
// Next call, return with data, limited by batch size.
int totalRowCount = 0;
int batchCount = 0;
while (scan.next()) {
assertTrue(scan.batchAccessor().rowCount() < ValueVector.MAX_ROW_COUNT);
BatchAccessor batchAccessor = scan.batchAccessor();
totalRowCount += batchAccessor.rowCount();
batchCount++;
batchAccessor.release();
}
assertEquals(ValueVector.MAX_ROW_COUNT, totalRowCount);
assertTrue(batchCount > 1);
mockBatch.close();
}
use of org.apache.drill.exec.physical.impl.scan.framework.ManagedReader in project drill by apache.
the class TestMockRowReader method testOptional.
/**
* Verify that the mock reader can generate nullable (optional) columns,
* including filling values with nulls at some percentage, 25% by
* default.
*/
@Test
public void testOptional() {
int rowCount = 10;
Map<String, Object> props = new HashMap<>();
props.put("nulls", 50);
MockTableDef.MockColumn[] cols = new MockTableDef.MockColumn[] { new MockTableDef.MockColumn("a", MinorType.INT, DataMode.OPTIONAL, null, null, null, null, null, null), new MockTableDef.MockColumn("b", MinorType.VARCHAR, DataMode.OPTIONAL, 10, null, null, null, null, props) };
MockTableDef.MockScanEntry entry = new MockTableDef.MockScanEntry(rowCount, true, null, null, cols);
MockSubScanPOP config = new MockSubScanPOP("dummy", true, Collections.singletonList(entry));
ManagedReader<SchemaNegotiator> reader = new ExtendedMockBatchReader(entry);
List<ManagedReader<SchemaNegotiator>> readers = Collections.singletonList(reader);
// Create options and the scan operator
ScanFixture mockBatch = buildScan(config, readers);
ScanOperatorExec scan = mockBatch.scanOp;
// First batch: build schema. The reader helps: it returns an
// empty first batch.
assertTrue(scan.buildSchema());
TupleMetadata expectedSchema = new SchemaBuilder().addNullable("a", MinorType.INT).addNullable("b", MinorType.VARCHAR, 10).build();
BatchSchema expectedBatchSchema = new BatchSchema(SelectionVectorMode.NONE, expectedSchema.toFieldList());
assertTrue(expectedBatchSchema.isEquivalent(scan.batchAccessor().schema()));
assertEquals(0, scan.batchAccessor().rowCount());
// Next call, return with data.
assertTrue(scan.next());
assertTrue(expectedBatchSchema.isEquivalent(scan.batchAccessor().schema()));
assertEquals(rowCount, scan.batchAccessor().rowCount());
scan.batchAccessor().release();
// EOF
assertFalse(scan.next());
mockBatch.close();
}
use of org.apache.drill.exec.physical.impl.scan.framework.ManagedReader in project drill by apache.
the class TestMockRowReader method testBasics.
/**
* Test the most basic case: required integers and strings.
*/
@Test
public void testBasics() {
int rowCount = 10;
MockTableDef.MockColumn[] cols = new MockTableDef.MockColumn[] { new MockTableDef.MockColumn("a", MinorType.INT, DataMode.REQUIRED, null, null, null, null, null, null), new MockTableDef.MockColumn("b", MinorType.VARCHAR, DataMode.REQUIRED, 10, null, null, null, null, null) };
MockTableDef.MockScanEntry entry = new MockTableDef.MockScanEntry(rowCount, true, null, null, cols);
MockSubScanPOP config = new MockSubScanPOP("dummy", true, Collections.singletonList(entry));
ManagedReader<SchemaNegotiator> reader = new ExtendedMockBatchReader(entry);
List<ManagedReader<SchemaNegotiator>> readers = Collections.singletonList(reader);
// Create options and the scan operator
ScanFixture mockBatch = buildScan(config, readers);
ScanOperatorExec scan = mockBatch.scanOp;
// First batch: build schema. The reader helps: it returns an
// empty first batch.
assertTrue(scan.buildSchema());
TupleMetadata expectedSchema = new SchemaBuilder().add("a", MinorType.INT).add("b", MinorType.VARCHAR, // Width is reflected in meta-data
10).buildSchema();
BatchSchema expectedBatchSchema = new BatchSchema(SelectionVectorMode.NONE, expectedSchema.toFieldList());
assertTrue(expectedBatchSchema.isEquivalent(scan.batchAccessor().schema()));
assertEquals(0, scan.batchAccessor().rowCount());
scan.batchAccessor().release();
// Next call, return with data.
assertTrue(scan.next());
assertTrue(expectedBatchSchema.isEquivalent(scan.batchAccessor().schema()));
assertEquals(rowCount, scan.batchAccessor().rowCount());
scan.batchAccessor().release();
// EOF
assertFalse(scan.next());
mockBatch.close();
}
use of org.apache.drill.exec.physical.impl.scan.framework.ManagedReader in project drill by apache.
the class TestMockRowReader method testColumnRepeat.
/**
* Test a repeated column.
*/
@Test
public void testColumnRepeat() {
int rowCount = 10;
MockTableDef.MockColumn[] cols = new MockTableDef.MockColumn[] { new MockTableDef.MockColumn("a", MinorType.INT, DataMode.REQUIRED, null, null, null, null, 3, null), new MockTableDef.MockColumn("b", MinorType.VARCHAR, DataMode.REQUIRED, 10, null, null, null, null, null) };
MockTableDef.MockScanEntry entry = new MockTableDef.MockScanEntry(rowCount, true, null, null, cols);
MockSubScanPOP config = new MockSubScanPOP("dummy", true, Collections.singletonList(entry));
ManagedReader<SchemaNegotiator> reader = new ExtendedMockBatchReader(entry);
List<ManagedReader<SchemaNegotiator>> readers = Collections.singletonList(reader);
// Create options and the scan operator
ScanFixture mockBatch = buildScan(config, readers);
ScanOperatorExec scan = mockBatch.scanOp;
// First batch: build schema. The reader helps: it returns an
// empty first batch.
assertTrue(scan.buildSchema());
TupleMetadata expectedSchema = new SchemaBuilder().add("a1", MinorType.INT).add("a2", MinorType.INT).add("a3", MinorType.INT).add("b", MinorType.VARCHAR, 10).build();
BatchSchema expectedBatchSchema = new BatchSchema(SelectionVectorMode.NONE, expectedSchema.toFieldList());
assertTrue(expectedBatchSchema.isEquivalent(scan.batchAccessor().schema()));
assertEquals(0, scan.batchAccessor().rowCount());
// Next call, return with data.
assertTrue(scan.next());
assertTrue(expectedBatchSchema.isEquivalent(scan.batchAccessor().schema()));
assertEquals(rowCount, scan.batchAccessor().rowCount());
scan.batchAccessor().release();
// EOF
assertFalse(scan.next());
mockBatch.close();
}
Aggregations