use of org.apache.drill.exec.physical.impl.scan.ScanOperatorExec in project drill by apache.
the class TestMockRowReader method testColumnRepeat.
/**
* Test a repeated column.
*/
@Test
public void testColumnRepeat() {
int rowCount = 10;
MockTableDef.MockColumn[] cols = new MockTableDef.MockColumn[] { new MockTableDef.MockColumn("a", MinorType.INT, DataMode.REQUIRED, null, null, null, null, 3, null), new MockTableDef.MockColumn("b", MinorType.VARCHAR, DataMode.REQUIRED, 10, null, null, null, null, null) };
MockTableDef.MockScanEntry entry = new MockTableDef.MockScanEntry(rowCount, true, null, null, cols);
MockSubScanPOP config = new MockSubScanPOP("dummy", true, Collections.singletonList(entry));
ManagedReader<SchemaNegotiator> reader = new ExtendedMockBatchReader(entry);
List<ManagedReader<SchemaNegotiator>> readers = Collections.singletonList(reader);
// Create options and the scan operator
ScanFixture mockBatch = buildScan(config, readers);
ScanOperatorExec scan = mockBatch.scanOp;
// First batch: build schema. The reader helps: it returns an
// empty first batch.
assertTrue(scan.buildSchema());
TupleMetadata expectedSchema = new SchemaBuilder().add("a1", MinorType.INT).add("a2", MinorType.INT).add("a3", MinorType.INT).add("b", MinorType.VARCHAR, 10).build();
BatchSchema expectedBatchSchema = new BatchSchema(SelectionVectorMode.NONE, expectedSchema.toFieldList());
assertTrue(expectedBatchSchema.isEquivalent(scan.batchAccessor().schema()));
assertEquals(0, scan.batchAccessor().rowCount());
// Next call, return with data.
assertTrue(scan.next());
assertTrue(expectedBatchSchema.isEquivalent(scan.batchAccessor().schema()));
assertEquals(rowCount, scan.batchAccessor().rowCount());
scan.batchAccessor().release();
// EOF
assertFalse(scan.next());
mockBatch.close();
}
use of org.apache.drill.exec.physical.impl.scan.ScanOperatorExec in project drill by apache.
the class TestMockRowReader method testBatchSize.
/**
* Verify limit on individual batch size (limiting row count per batch).
*/
@Test
public void testBatchSize() {
int rowCount = 20;
int batchSize = 10;
MockTableDef.MockColumn[] cols = new MockTableDef.MockColumn[] { new MockTableDef.MockColumn("a", MinorType.INT, DataMode.REQUIRED, null, null, null, null, null, null), new MockTableDef.MockColumn("b", MinorType.VARCHAR, DataMode.REQUIRED, 10, null, null, null, null, null) };
MockTableDef.MockScanEntry entry = new MockTableDef.MockScanEntry(rowCount, true, batchSize, null, cols);
MockSubScanPOP config = new MockSubScanPOP("dummy", true, Collections.singletonList(entry));
ManagedReader<SchemaNegotiator> reader = new ExtendedMockBatchReader(entry);
List<ManagedReader<SchemaNegotiator>> readers = Collections.singletonList(reader);
// Create options and the scan operator
ScanFixture mockBatch = buildScan(config, readers);
ScanOperatorExec scan = mockBatch.scanOp;
// First batch: build schema. The reader helps: it returns an
// empty first batch.
assertTrue(scan.buildSchema());
assertEquals(0, scan.batchAccessor().rowCount());
// Next call, return with data, limited by batch size.
assertTrue(scan.next());
assertEquals(batchSize, scan.batchAccessor().rowCount());
scan.batchAccessor().release();
assertTrue(scan.next());
assertEquals(batchSize, scan.batchAccessor().rowCount());
scan.batchAccessor().release();
// EOF
assertFalse(scan.next());
mockBatch.close();
}
use of org.apache.drill.exec.physical.impl.scan.ScanOperatorExec in project drill by apache.
the class TestScanLateSchema method testLateSchemaDataOnFirst.
@Test
public void testLateSchemaDataOnFirst() {
// Create a mock reader, return two batches: one schema-only, another with data.
ReaderCreator creator = negotiator -> {
MockLateSchemaReader reader = new MockLateSchemaReader(negotiator);
reader.batchLimit = 1;
reader.returnDataOnFirst = true;
return reader;
};
ScanFixture scanFixture = simpleFixture(creator);
ScanOperatorExec scan = scanFixture.scanOp;
// First batch: build schema. The reader helps: it returns an
// empty first batch.
assertTrue(scan.buildSchema());
assertEquals(0, scan.batchAccessor().rowCount());
SingleRowSet expected = makeExpected();
RowSetComparison verifier = new RowSetComparison(expected);
assertEquals(expected.batchSchema(), scan.batchAccessor().schema());
// Next call, return with data.
assertTrue(scan.next());
verifier.verifyAndClearAll(fixture.wrap(scan.batchAccessor().container()));
// EOF
assertFalse(scan.next());
assertEquals(0, scan.batchAccessor().rowCount());
scanFixture.close();
}
use of org.apache.drill.exec.physical.impl.scan.ScanOperatorExec in project drill by apache.
the class TestScanOuputSchema method testStrictProvidedSchemaWithWildcardAndSpecialCols.
@Test
public void testStrictProvidedSchemaWithWildcardAndSpecialCols() {
TupleMetadata providedSchema = new SchemaBuilder().add("a", // Projected, in reader
MinorType.INT).add("d", // Projected, not in reader
MinorType.BIGINT).add("e", // Not projected, not in reader
MinorType.BIGINT).buildSchema();
providedSchema.metadata("d").setDefaultValue("20");
providedSchema.metadata("e").setDefaultValue("30");
providedSchema.setProperty(TupleMetadata.IS_STRICT_SCHEMA_PROP, Boolean.TRUE.toString());
providedSchema.metadata("a").setBooleanProperty(ColumnMetadata.EXCLUDE_FROM_WILDCARD, true);
BaseScanFixtureBuilder builder = new BaseScanFixtureBuilder(fixture);
// Project schema only
builder.setProjection(RowSetTestUtils.projectAll());
builder.addReader(negotiator -> new MockSimpleReader(negotiator));
builder.builder.providedSchema(providedSchema);
builder.builder.nullType(Types.optional(MinorType.VARCHAR));
ScanFixture scanFixture = builder.build();
ScanOperatorExec scan = scanFixture.scanOp;
TupleMetadata expectedSchema = new SchemaBuilder().add("d", MinorType.BIGINT).add("e", MinorType.BIGINT).buildSchema();
// Initial schema
assertTrue(scan.buildSchema());
{
SingleRowSet expected = fixture.rowSetBuilder(expectedSchema).build();
RowSetUtilities.verify(expected, fixture.wrap(scan.batchAccessor().container()));
}
// Batch with defaults and null types
assertTrue(scan.next());
{
SingleRowSet expected = fixture.rowSetBuilder(expectedSchema).addRow(20L, 30L).build();
RowSetUtilities.verify(expected, fixture.wrap(scan.batchAccessor().container()));
}
assertFalse(scan.next());
scanFixture.close();
}
use of org.apache.drill.exec.physical.impl.scan.ScanOperatorExec in project drill by apache.
the class TestScanOuputSchema method testProvidedSchemaWithWildcard.
/**
* Test non-strict specified schema, with a wildcard, with extra
* reader columns. Reader columns are included in output.
*/
@Test
public void testProvidedSchemaWithWildcard() {
TupleMetadata providedSchema = new SchemaBuilder().add("a", // Projected, in reader
MinorType.INT).add("d", // Projected, not in reader
MinorType.BIGINT).add("e", // Not projected, not in reader
MinorType.BIGINT).buildSchema();
providedSchema.metadata("d").setDefaultValue("20");
providedSchema.metadata("e").setDefaultValue("30");
BaseScanFixtureBuilder builder = new BaseScanFixtureBuilder(fixture);
builder.setProjection(RowSetTestUtils.projectAll());
builder.addReader(negotiator -> new MockSimpleReader(negotiator));
builder.builder.providedSchema(providedSchema);
builder.builder.nullType(Types.optional(MinorType.VARCHAR));
ScanFixture scanFixture = builder.build();
ScanOperatorExec scan = scanFixture.scanOp;
TupleMetadata expectedSchema = new SchemaBuilder().add("a", MinorType.INT).add("d", MinorType.BIGINT).add("e", MinorType.BIGINT).add("b", MinorType.VARCHAR).add("c", MinorType.VARCHAR).buildSchema();
// Initial schema
assertTrue(scan.buildSchema());
{
SingleRowSet expected = fixture.rowSetBuilder(expectedSchema).build();
RowSetUtilities.verify(expected, fixture.wrap(scan.batchAccessor().container()));
}
// Batch with defaults and null types
assertTrue(scan.next());
{
SingleRowSet expected = fixture.rowSetBuilder(expectedSchema).addRow(10, 20L, 30L, "foo", "bar").build();
RowSetUtilities.verify(expected, fixture.wrap(scan.batchAccessor().container()));
}
assertFalse(scan.next());
scanFixture.close();
}
Aggregations