use of org.apache.drill.exec.physical.impl.scan.ScanOperatorExec in project drill by apache.
the class TestScanLateSchema method testLateSchemaEarlyCloseWithData.
/**
* Test the case that a late schema reader is closed before
* consuming the look-ahead batch used to infer schema.
*/
@Test
public void testLateSchemaEarlyCloseWithData() {
// Create a mock reader, return two batches: one schema-only, another with data.
ObservableCreator creator = new ObservableCreator() {
@Override
public ManagedReader create(SchemaNegotiator negotiator) {
MockLateSchemaReader reader = new MockLateSchemaReader(negotiator);
reader.batchLimit = 2;
reader.returnDataOnFirst = true;
return reader;
}
};
ScanFixture scanFixture = simpleFixture(creator);
ScanOperatorExec scan = scanFixture.scanOp;
// Get the schema as above.
assertTrue(scan.buildSchema());
// Lookahead batch created.
scanFixture.close();
MockLateSchemaReader reader = creator.reader();
assertEquals(1, reader.batchCount);
assertTrue(reader.closeCalled);
}
use of org.apache.drill.exec.physical.impl.scan.ScanOperatorExec in project drill by apache.
the class TestScanLateSchema method testLateSchemaLifecycle.
/**
* Most basic test of a reader that discovers its schema as it goes along.
* The purpose is to validate the most basic life-cycle steps before trying
* more complex variations.
*/
@Test
public void testLateSchemaLifecycle() {
// Create a mock reader, return two batches: one schema-only, another with data.
ReaderCreator creator = negotiator -> {
MockLateSchemaReader reader = new MockLateSchemaReader(negotiator);
reader.batchLimit = 2;
reader.returnDataOnFirst = false;
return reader;
};
// Create the scan operator
ScanFixture scanFixture = simpleFixture(creator);
ScanOperatorExec scan = scanFixture.scanOp;
// First batch: build schema. The reader does not help: it returns an
// empty first batch.
assertTrue(scan.buildSchema());
assertEquals(0, scan.batchAccessor().rowCount());
// Create the expected result.
SingleRowSet expected = makeExpected(20);
RowSetComparison verifier = new RowSetComparison(expected);
assertEquals(expected.batchSchema(), scan.batchAccessor().schema());
// Next call, return with data.
assertTrue(scan.next());
verifier.verifyAndClearAll(fixture.wrap(scan.batchAccessor().container()));
// EOF
assertFalse(scan.next());
assertEquals(0, scan.batchAccessor().rowCount());
scanFixture.close();
}
use of org.apache.drill.exec.physical.impl.scan.ScanOperatorExec in project drill by apache.
the class TestScanLateSchema method testLateSchemaNoData.
@Test
public void testLateSchemaNoData() {
// Create a mock reader, return two batches: one schema-only, another with data.
ReaderCreator creator = negotiator -> {
MockLateSchemaReader reader = new MockLateSchemaReader(negotiator);
reader.batchLimit = 0;
reader.returnDataOnFirst = false;
return reader;
};
ScanFixture scanFixture = simpleFixture(creator);
ScanOperatorExec scan = scanFixture.scanOp;
// First batch: EOF.
assertFalse(scan.buildSchema());
scanFixture.close();
}
use of org.apache.drill.exec.physical.impl.scan.ScanOperatorExec in project drill by apache.
the class TestScanLimit method testLimitOnSecondReader.
/**
* LIMIT 125: full first reader, limit on second
*/
@Test
public void testLimitOnSecondReader() {
TestFixture fixture = new TestFixture(125);
ScanOperatorExec scan = fixture.scan;
assertTrue(scan.buildSchema());
assertTrue(scan.next());
BatchAccessor batch = scan.batchAccessor();
assertEquals(50, batch.rowCount());
batch.release();
assertTrue(scan.next());
batch = scan.batchAccessor();
assertEquals(50, batch.rowCount());
batch.release();
// First batch, second reader
assertTrue(scan.next());
batch = scan.batchAccessor();
assertEquals(25, batch.rowCount());
batch.release();
// No second batch
assertFalse(scan.next());
fixture.close();
// Both readers were created.
assertEquals(2, fixture.createCount());
}
use of org.apache.drill.exec.physical.impl.scan.ScanOperatorExec in project drill by apache.
the class TestFileScan method testImplicitColumns.
/**
* Basic sanity test of a couple of implicit columns, along
* with all table columns in table order. Full testing of implicit
* columns is done on lower-level components.
*/
@Test
public void testImplicitColumns() {
ReaderCreator creator = negotiator -> {
MockEarlySchemaReader reader = new MockEarlySchemaReader(negotiator);
reader.batchLimit = 1;
return reader;
};
// Select table and implicit columns.
FileScanFixtureBuilder builder = new FileScanFixtureBuilder();
builder.setProjection("a", "b", "filename", "suffix");
builder.addReader(creator);
ScanFixture scanFixture = builder.build();
ScanOperatorExec scan = scanFixture.scanOp;
// Expect data and implicit columns
TupleMetadata expectedSchema = new SchemaBuilder().add("a", MinorType.INT).addNullable("b", MinorType.VARCHAR).add("filename", MinorType.VARCHAR).add("suffix", MinorType.VARCHAR).build();
SingleRowSet expected = fixture.rowSetBuilder(expectedSchema).addRow(10, "fred", MOCK_FILE_NAME, MOCK_SUFFIX).addRow(20, "wilma", MOCK_FILE_NAME, MOCK_SUFFIX).build();
// Schema should include implicit columns.
assertTrue(scan.buildSchema());
assertEquals(expected.container().getSchema(), scan.batchAccessor().schema());
scan.batchAccessor().release();
// Read one batch, should contain implicit columns
assertTrue(scan.next());
RowSetUtilities.verify(expected, fixture.wrap(scan.batchAccessor().container()));
// EOF
assertFalse(scan.next());
assertEquals(0, scan.batchAccessor().rowCount());
scanFixture.close();
}
Aggregations