use of org.apache.drill.exec.record.metadata.SchemaBuilder in project drill by apache.
the class TestScanOrchestratorEarlySchema method testTypeSmoothingExplicit.
/**
* The projection mechanism provides "type smoothing": null
* columns prefer the type of previously-seen non-null columns.
*
* <code><pre>
* SELECT a, b ...
*
* Table 1: (a: BIGINT, b: VARCHAR)
* Table 2: (a: BIGINT)
* Table 3: (b: VARCHAR)
* </pre></code>
* The result in all cases should be
* <tt>(a : BIGINT, b: VARCHAR)</tt>
*/
@Test
public void testTypeSmoothingExplicit() {
ScanOrchestratorBuilder builder = new MockScanBuilder();
TupleMetadata table1Schema = new SchemaBuilder().add("A", MinorType.BIGINT).addNullable("B", MinorType.VARCHAR).addArray("C", MinorType.INT).buildSchema();
BatchSchema resultSchema = new BatchSchema(SelectionVectorMode.NONE, table1Schema.toFieldList());
SchemaTracker tracker = new SchemaTracker();
// SELECT * ...
builder.projection(RowSetTestUtils.projectList("a", "b", "c"));
ScanSchemaOrchestrator scanner = new ScanSchemaOrchestrator(fixture.allocator(), builder);
int schemaVersion;
{
// ... FROM table1(a, b, c)
ReaderSchemaOrchestrator reader = scanner.startReader();
reader.makeTableLoader(table1Schema);
reader.defineSchema();
VectorContainer output = scanner.output();
tracker.trackSchema(output);
schemaVersion = tracker.schemaVersion();
assertTrue(resultSchema.isEquivalent(output.getSchema()));
scanner.closeReader();
}
{
// ... FROM table1(a, c)
//
// B is dropped. But, it is nullable, so the vector cache
// can supply the proper type to ensure continuity.
TupleMetadata table2Schema = new SchemaBuilder().add("A", MinorType.BIGINT).addArray("C", MinorType.INT).buildSchema();
ReaderSchemaOrchestrator reader = scanner.startReader();
reader.makeTableLoader(table2Schema);
reader.defineSchema();
VectorContainer output = scanner.output();
tracker.trackSchema(output);
assertEquals(schemaVersion, tracker.schemaVersion());
assertTrue(resultSchema.isEquivalent(output.getSchema()));
scanner.closeReader();
}
{
// ... FROM table1(a, b)
//
// C is dropped. But, it is an array, which uses zero-elements
// to indicate null, so the vector cache can fill in the type.
TupleMetadata table3Schema = new SchemaBuilder().add("A", MinorType.BIGINT).addNullable("B", MinorType.VARCHAR).buildSchema();
ReaderSchemaOrchestrator reader = scanner.startReader();
reader.makeTableLoader(table3Schema);
reader.defineSchema();
VectorContainer output = scanner.output();
tracker.trackSchema(output);
assertEquals(schemaVersion, tracker.schemaVersion());
assertTrue(resultSchema.isEquivalent(output.getSchema()));
scanner.closeReader();
}
{
// ... FROM table1(b, c)
//
// This version carries over a non-nullable BIGINT, but that
// can't become a null column, so nullable BIGINT is substituted,
// result in a schema change.
TupleMetadata table2Schema = new SchemaBuilder().addNullable("B", MinorType.VARCHAR).addArray("C", MinorType.INT).buildSchema();
ReaderSchemaOrchestrator reader = scanner.startReader();
reader.makeTableLoader(table2Schema);
reader.defineSchema();
VectorContainer output = scanner.output();
tracker.trackSchema(output);
assertEquals(MinorType.BIGINT, output.getSchema().getColumn(0).getType().getMinorType());
assertEquals(DataMode.OPTIONAL, output.getSchema().getColumn(0).getType().getMode());
assertTrue(schemaVersion < tracker.schemaVersion());
scanner.closeReader();
}
scanner.close();
}
use of org.apache.drill.exec.record.metadata.SchemaBuilder in project drill by apache.
the class TestScanOrchestratorEarlySchema method testEarlySchemaSelectAll.
/**
* Test SELECT a, b FROM table(a, b)
*/
@Test
public void testEarlySchemaSelectAll() {
ScanOrchestratorBuilder builder = new MockScanBuilder();
// SELECT a, b ...
builder.projection(RowSetTestUtils.projectList("a", "b"));
ScanSchemaOrchestrator scanner = new ScanSchemaOrchestrator(fixture.allocator(), builder);
// ... FROM table
ReaderSchemaOrchestrator reader = scanner.startReader();
// file schema (a, b)
TupleMetadata tableSchema = new SchemaBuilder().add("a", MinorType.INT).add("b", MinorType.VARCHAR).buildSchema();
// Create the table loader
ResultSetLoader loader = reader.makeTableLoader(tableSchema);
// Don't bother with an empty batch here or in other tests.
// Simulates the second reader in a scan.
// Create a batch of data.
reader.startBatch();
loader.writer().addRow(1, "fred").addRow(2, "wilma");
reader.endBatch();
// Verify
SingleRowSet expected = fixture.rowSetBuilder(tableSchema).addRow(1, "fred").addRow(2, "wilma").build();
RowSetUtilities.verify(expected, fixture.wrap(scanner.output()));
scanner.close();
}
use of org.apache.drill.exec.record.metadata.SchemaBuilder in project drill by apache.
the class TestScanOrchestratorEarlySchema method testEarlySchemaSelectNone.
/**
* Test SELECT - FROM table(a, b)
*/
@Test
public void testEarlySchemaSelectNone() {
ScanOrchestratorBuilder builder = new MockScanBuilder();
// SELECT ...
// (Like SELECT COUNT(*) ...
builder.projection(RowSetTestUtils.projectList());
ScanSchemaOrchestrator scanner = new ScanSchemaOrchestrator(fixture.allocator(), builder);
// ... FROM table
ReaderSchemaOrchestrator reader = scanner.startReader();
// file schema (a, b)
TupleMetadata tableSchema = new SchemaBuilder().add("a", MinorType.INT).add("b", MinorType.VARCHAR).buildSchema();
// Create the table loader
ResultSetLoader loader = reader.makeTableLoader(tableSchema);
// Verify that unprojected column is unprojected in the
// table loader.
assertTrue(loader.isProjectionEmpty());
assertFalse(loader.writer().column("a").isProjected());
assertFalse(loader.writer().column("b").isProjected());
// Verify empty batch.
BatchSchema expectedSchema = new BatchSchemaBuilder().withSchemaBuilder(new SchemaBuilder()).build();
// Create a batch of data.
reader.startBatch();
loader.writer().addRow(1, "fred").addRow(2, "wilma");
reader.endBatch();
// Verify
{
// Two rows, no data.
SingleRowSet expected = fixture.rowSetBuilder(expectedSchema).addRow().addRow().build();
RowSetUtilities.verify(expected, fixture.wrap(scanner.output()));
}
// Fast path to fill in empty rows
reader.startBatch();
loader.skipRows(10);
reader.endBatch();
// Verify
{
VectorContainer output = scanner.output();
assertEquals(10, output.getRecordCount());
output.zeroVectors();
}
scanner.close();
}
use of org.apache.drill.exec.record.metadata.SchemaBuilder in project drill by apache.
the class TestScanOrchestratorEarlySchema method testEarlySchemaSelectExtra.
/**
* Test SELECT a, b, c FROM table(a, b)
* c will be null
*/
@Test
public void testEarlySchemaSelectExtra() {
ScanOrchestratorBuilder builder = new MockScanBuilder();
// SELECT a, b, c ...
builder.projection(RowSetTestUtils.projectList("a", "b", "c"));
ScanSchemaOrchestrator scanner = new ScanSchemaOrchestrator(fixture.allocator(), builder);
// ... FROM table
ReaderSchemaOrchestrator reader = scanner.startReader();
// file schema (a, b)
TupleMetadata tableSchema = new SchemaBuilder().add("a", MinorType.INT).add("b", MinorType.VARCHAR).buildSchema();
// Create the table loader
ResultSetLoader loader = reader.makeTableLoader(tableSchema);
TupleMetadata expectedSchema = new SchemaBuilder().add("a", MinorType.INT).add("b", MinorType.VARCHAR).addNullable("c", MinorType.INT).buildSchema();
// Create a batch of data.
reader.startBatch();
loader.writer().addRow(1, "fred").addRow(2, "wilma");
reader.endBatch();
// Verify
SingleRowSet expected = fixture.rowSetBuilder(expectedSchema).addRow(1, "fred", null).addRow(2, "wilma", null).build();
RowSetUtilities.verify(expected, fixture.wrap(scanner.output()));
scanner.close();
}
use of org.apache.drill.exec.record.metadata.SchemaBuilder in project drill by apache.
the class TestScanOrchestratorEarlySchema method testEmptySchema.
/**
* Test SELECT * from an early-schema table of () (that is,
* a schema that consists of zero columns.
*/
@Test
public void testEmptySchema() {
ScanOrchestratorBuilder builder = new MockScanBuilder();
// SELECT * ...
builder.projection(RowSetTestUtils.projectAll());
ScanSchemaOrchestrator scanner = new ScanSchemaOrchestrator(fixture.allocator(), builder);
// ... FROM table
ReaderSchemaOrchestrator reader = scanner.startReader();
// file schema ()
TupleMetadata tableSchema = new SchemaBuilder().buildSchema();
// Create the table loader
reader.makeTableLoader(tableSchema);
// Create a batch of data. Because there are no columns, it does
// not make sense to ready any rows.
reader.startBatch();
reader.endBatch();
// Verify
{
SingleRowSet expected = fixture.rowSetBuilder(tableSchema).build();
RowSetUtilities.verify(expected, fixture.wrap(scanner.output()));
}
scanner.close();
}
Aggregations