use of org.apache.drill.exec.physical.impl.scan.v3.ScanLifecycleBuilder in project drill by apache.
the class TestScanLifecycleTwoReaders method testShrinkingSchemaWithConflict.
/**
* Shrinking schema, as above. Explicit projection:<pre><code>
* SELECT a, b FROM (a) then (a,b)
* </code></pre><p>
* But choose a missing column type (the default
* Nullable INT) in the first reader that will conflict with the actual column type
* (VARCHAR) in the second.
*/
@Test
public void testShrinkingSchemaWithConflict() {
ScanLifecycleBuilder builder = new ScanLifecycleBuilder();
builder.projection(RowSetTestUtils.projectList("a", "b"));
builder.readerFactory(new TwoReaderFactory() {
@Override
public ManagedReader firstReader(SchemaNegotiator negotiator) {
return new MockSingleColReader(negotiator);
}
@Override
public ManagedReader secondReader(SchemaNegotiator negotiator) {
return new MockEarlySchemaReader(negotiator, 1);
}
});
ScanLifecycle scan = buildScan(builder);
RowBatchReader reader = scan.nextReader();
assertTrue(reader.open());
assertTrue(reader.next());
reader.output().clear();
assertFalse(reader.next());
reader.close();
reader = scan.nextReader();
try {
reader.open();
fail();
} catch (UserException e) {
assertTrue(e.getMessage().contains("conflict"));
}
reader.close();
scan.close();
}
use of org.apache.drill.exec.physical.impl.scan.v3.ScanLifecycleBuilder in project drill by apache.
the class TestScanLifecycleBasics method testEarlySchemaEmpty.
/**
* Test SELECT * from an early-schema table of () (that is,
* a schema that consists of zero columns.
*/
@Test
public void testEarlySchemaEmpty() {
ScanLifecycleBuilder builder = new ScanLifecycleBuilder();
builder.readerFactory(new SingleReaderFactory() {
@Override
public ManagedReader next(SchemaNegotiator negotiator) {
return new MockEmptySchemaReader(negotiator);
}
});
ScanLifecycle scan = buildScan(builder);
assertSame(ProjectionType.ALL, scan.schemaTracker().projectionType());
RowBatchReader reader = scan.nextReader();
assertTrue(reader.open());
// Early schema: so output schema is available after open
TupleMetadata expectedSchema = new SchemaBuilder().build();
assertEquals(expectedSchema, scan.outputSchema());
assertTrue(reader.next());
RowSet expected = fixture.rowSetBuilder(expectedSchema).addRow().addRow().build();
RowSetUtilities.verify(expected, fixture.wrap(reader.output()));
assertFalse(reader.next());
reader.close();
scan.close();
}
use of org.apache.drill.exec.physical.impl.scan.v3.ScanLifecycleBuilder in project drill by apache.
the class TestScanLifecycleSchema method testDefinedSchemaSubset.
/**
* The defined schema is a subset of the reader's schema; the
* defined schema acts as a project list.
*/
@Test
public void testDefinedSchemaSubset() {
ScanLifecycleBuilder builder = new ScanLifecycleBuilder();
builder.definedSchema(SCHEMA);
builder.readerFactory(new SingleReaderFactory() {
@Override
public ManagedReader next(SchemaNegotiator negotiator) {
return new MockThreeColReader(negotiator);
}
});
ScanLifecycle scan = buildScan(builder);
RowBatchReader reader = scan.nextReader();
assertTrue(reader.open());
assertEquals(SCHEMA, scan.outputSchema());
assertTrue(reader.next());
RowSet expected = fixture.rowSetBuilder(SCHEMA).addRow(101, "wilma").addRow(102, "betty").build();
RowSetUtilities.verify(expected, fixture.wrap(reader.output()));
assertFalse(reader.next());
reader.close();
scan.close();
}
use of org.apache.drill.exec.physical.impl.scan.v3.ScanLifecycleBuilder in project drill by apache.
the class TestScanLifecycleSchema method testLenientProvidedSchemaSubset.
/**
* Lenient provided schema which is a subset of the reader's schema; the
* provided schema agrees with the reader types
*/
@Test
public void testLenientProvidedSchemaSubset() {
ScanLifecycleBuilder builder = new ScanLifecycleBuilder();
builder.providedSchema(SCHEMA);
builder.readerFactory(new SingleReaderFactory() {
@Override
public ManagedReader next(SchemaNegotiator negotiator) {
return new MockThreeColReader(negotiator);
}
});
ScanLifecycle scan = buildScan(builder);
assertSame(ProjectionType.ALL, scan.schemaTracker().projectionType());
RowBatchReader reader = scan.nextReader();
assertTrue(reader.open());
assertEquals(MockThreeColReader.READER_SCHEMA, scan.outputSchema());
assertTrue(reader.next());
RowSet expected = fixture.rowSetBuilder(MockThreeColReader.READER_SCHEMA).addRow(101, "wilma", 1001).addRow(102, "betty", 1002).build();
RowSetUtilities.verify(expected, fixture.wrap(reader.output()));
assertFalse(reader.next());
reader.close();
scan.close();
}
use of org.apache.drill.exec.physical.impl.scan.v3.ScanLifecycleBuilder in project drill by apache.
the class TestScanLifecycleSchema method testStrictProvidedSchemaSubset.
/**
* Lenient provided schema which is a subset of the reader's schema; the
* provided schema agrees with the reader types
*/
@Test
public void testStrictProvidedSchemaSubset() {
TupleMetadata schema = new SchemaBuilder().addAll(SCHEMA).build();
schema.setBooleanProperty(TupleMetadata.IS_STRICT_SCHEMA_PROP, true);
ScanLifecycleBuilder builder = new ScanLifecycleBuilder();
builder.providedSchema(schema);
builder.readerFactory(new SingleReaderFactory() {
@Override
public ManagedReader next(SchemaNegotiator negotiator) {
return new MockThreeColReader(negotiator);
}
});
ScanLifecycle scan = buildScan(builder);
RowBatchReader reader = scan.nextReader();
assertTrue(reader.open());
assertEquals(SCHEMA, scan.outputSchema());
assertTrue(reader.next());
RowSet expected = fixture.rowSetBuilder(SCHEMA).addRow(101, "wilma").addRow(102, "betty").build();
RowSetUtilities.verify(expected, fixture.wrap(reader.output()));
assertFalse(reader.next());
reader.close();
scan.close();
}
Aggregations