use of org.apache.drill.exec.record.metadata.SchemaBuilder in project drill by apache.
the class TestScanOrchestratorLateSchema method testLateSchemaSelectDisjoint.
/**
* Test SELECT a, c FROM table(a, b)
*/
@Test
public void testLateSchemaSelectDisjoint() {
ScanOrchestratorBuilder builder = new MockScanBuilder();
// SELECT a, c ...
builder.projection(RowSetTestUtils.projectList("a", "c"));
ScanSchemaOrchestrator orchestrator = new ScanSchemaOrchestrator(fixture.allocator(), builder);
// ... FROM file
ReaderSchemaOrchestrator reader = orchestrator.startReader();
// Create the table loader
ResultSetLoader loader = reader.makeTableLoader(null);
// file schema (a, b)
reader.startBatch();
RowSetLoader writer = loader.writer();
writer.addColumn(SchemaBuilder.columnSchema("a", MinorType.INT, DataMode.REQUIRED));
writer.addColumn(SchemaBuilder.columnSchema("b", MinorType.VARCHAR, DataMode.REQUIRED));
// Create a batch of data.
writer.addRow(1, "fred").addRow(2, "wilma");
reader.endBatch();
// Verify
TupleMetadata expectedSchema = new SchemaBuilder().add("a", MinorType.INT).addNullable("c", MinorType.INT).buildSchema();
SingleRowSet expected = fixture.rowSetBuilder(expectedSchema).addRow(1, null).addRow(2, null).build();
new RowSetComparison(expected).verifyAndClearAll(fixture.wrap(orchestrator.output()));
orchestrator.close();
}
use of org.apache.drill.exec.record.metadata.SchemaBuilder in project drill by apache.
the class TestScanOrchestratorLateSchema method testLateSchemaWildcard.
/**
* Test SELECT * from an early-schema table of (a, b)
*/
@Test
public void testLateSchemaWildcard() {
ScanOrchestratorBuilder builder = new MockScanBuilder();
// SELECT * ...
builder.projection(RowSetTestUtils.projectAll());
ScanSchemaOrchestrator orchestrator = new ScanSchemaOrchestrator(fixture.allocator(), builder);
// ... FROM table
ReaderSchemaOrchestrator reader = orchestrator.startReader();
// Create the table loader
ResultSetLoader loader = reader.makeTableLoader(null);
// Late schema: no batch provided up front.
assertFalse(reader.hasSchema());
// Start a batch and discover a schema: (a, b)
reader.startBatch();
RowSetLoader writer = loader.writer();
writer.addColumn(SchemaBuilder.columnSchema("a", MinorType.INT, DataMode.REQUIRED));
writer.addColumn(SchemaBuilder.columnSchema("b", MinorType.VARCHAR, DataMode.REQUIRED));
// Create a batch of data using the discovered schema
writer.addRow(1, "fred").addRow(2, "wilma");
reader.endBatch();
// Verify
TupleMetadata tableSchema = new SchemaBuilder().add("a", MinorType.INT).add("b", MinorType.VARCHAR).buildSchema();
SingleRowSet expected = fixture.rowSetBuilder(tableSchema).addRow(1, "fred").addRow(2, "wilma").build();
new RowSetComparison(expected).verifyAndClearAll(fixture.wrap(orchestrator.output()));
orchestrator.close();
}
use of org.apache.drill.exec.record.metadata.SchemaBuilder in project drill by apache.
the class TestDirectConverter method testStringToDateTimeDefault.
/**
* Test VARCHAR to DATE, TIME and TIMESTAMP conversion
* using default ISO formats.
*/
@Test
public void testStringToDateTimeDefault() {
TupleMetadata outputSchema = new SchemaBuilder().add("date", MinorType.DATE).add("time", MinorType.TIME).add("ts", MinorType.TIMESTAMP).buildSchema();
TupleMetadata inputSchema = new SchemaBuilder().add("date", MinorType.VARCHAR).add("time", MinorType.VARCHAR).add("ts", MinorType.VARCHAR).buildSchema();
ConversionTestFixture testFixture = new ConversionTestFixture(fixture.allocator(), outputSchema);
testFixture.createConvertersFor(inputSchema);
RowSet actual = testFixture.addRow("2019-03-28", "12:34:56", "2019-03-28T12:34:56").build();
LocalTime lt = LocalTime.of(12, 34, 56);
LocalDate ld = LocalDate.of(2019, 3, 28);
Instant ts = LocalDateTime.of(ld, lt).toInstant(ZoneOffset.UTC);
final SingleRowSet expected = fixture.rowSetBuilder(outputSchema).addRow(ld, lt, ts).build();
RowSetUtilities.verify(expected, actual);
}
use of org.apache.drill.exec.record.metadata.SchemaBuilder in project drill by apache.
the class TestDirectConverter method testSpecialConversionType.
/**
* Test the specialized types: conversation to/from string.
*/
@Test
public void testSpecialConversionType() {
StandardConversions conversions = StandardConversions.builder().build();
TupleMetadata schema = new SchemaBuilder().add("time", MinorType.TIME).add("date", MinorType.DATE).add("ts", MinorType.TIMESTAMP).add("interval", MinorType.INTERVAL).add("year", MinorType.INTERVALYEAR).add("day", MinorType.INTERVALDAY).add("int", MinorType.INT).add("bi", MinorType.BIGINT).add("str", MinorType.VARCHAR).buildSchema();
ColumnMetadata timeCol = schema.metadata("time");
ColumnMetadata dateCol = schema.metadata("date");
ColumnMetadata tsCol = schema.metadata("ts");
ColumnMetadata intervalCol = schema.metadata("interval");
ColumnMetadata yearCol = schema.metadata("year");
ColumnMetadata dayCol = schema.metadata("day");
ColumnMetadata intCol = schema.metadata("int");
ColumnMetadata bigIntCol = schema.metadata("bi");
ColumnMetadata stringCol = schema.metadata("str");
// TIME
expect(ConversionType.NONE, conversions.analyze(timeCol, timeCol));
expect(ConversionType.EXPLICIT, conversions.analyze(timeCol, stringCol));
expect(ConversionType.EXPLICIT, conversions.analyze(stringCol, timeCol));
expect(ConversionType.IMPLICIT, conversions.analyze(intCol, timeCol));
expect(ConversionType.IMPLICIT, conversions.analyze(timeCol, intCol));
// DATE
expect(ConversionType.NONE, conversions.analyze(dateCol, dateCol));
expect(ConversionType.EXPLICIT, conversions.analyze(dateCol, stringCol));
expect(ConversionType.EXPLICIT, conversions.analyze(stringCol, dateCol));
expect(ConversionType.IMPLICIT, conversions.analyze(bigIntCol, dateCol));
expect(ConversionType.IMPLICIT, conversions.analyze(dateCol, bigIntCol));
// TIMESTAMP
expect(ConversionType.NONE, conversions.analyze(tsCol, tsCol));
expect(ConversionType.EXPLICIT, conversions.analyze(tsCol, stringCol));
expect(ConversionType.EXPLICIT, conversions.analyze(stringCol, tsCol));
expect(ConversionType.IMPLICIT, conversions.analyze(bigIntCol, tsCol));
expect(ConversionType.IMPLICIT, conversions.analyze(tsCol, bigIntCol));
// INTERVAL
expect(ConversionType.NONE, conversions.analyze(intervalCol, intervalCol));
expect(ConversionType.EXPLICIT, conversions.analyze(intervalCol, stringCol));
expect(ConversionType.EXPLICIT, conversions.analyze(stringCol, intervalCol));
// INTERVALYEAR
expect(ConversionType.NONE, conversions.analyze(yearCol, yearCol));
expect(ConversionType.EXPLICIT, conversions.analyze(yearCol, stringCol));
expect(ConversionType.EXPLICIT, conversions.analyze(stringCol, yearCol));
// INTERVALDAY
expect(ConversionType.NONE, conversions.analyze(dayCol, dayCol));
expect(ConversionType.EXPLICIT, conversions.analyze(dayCol, stringCol));
expect(ConversionType.EXPLICIT, conversions.analyze(stringCol, dayCol));
}
use of org.apache.drill.exec.record.metadata.SchemaBuilder in project drill by apache.
the class TestDirectConverter method testImplicitConversionIntTruncation.
/**
* The column accessors provide only int setters. For performance, the int value is
* assumed to be of the correct range for the target column. If not, truncation of
* the highest bytes occurs.
* <p>
* The assumption is, if the reader or other code expects that overflow might
* occur, that code should be implemented in the client (or in a type conversion
* shim), leaving the normal code path to optimize for the 99% of the cases where
* the value is in the proper range.
*/
@Test
public void testImplicitConversionIntTruncation() {
TupleMetadata schema = new SchemaBuilder().add("ti", MinorType.TINYINT).add("si", MinorType.SMALLINT).buildSchema();
// Test allowed implicit conversions.
RowSet actual = new RowSetBuilder(fixture.allocator(), schema).addRow(Byte.MAX_VALUE + 1, Short.MAX_VALUE + 1).addRow(Byte.MAX_VALUE + 2, Short.MAX_VALUE + 2).build();
// Build the expected vector without a type converter.
final SingleRowSet expected = fixture.rowSetBuilder(schema).addRow(Byte.MIN_VALUE, Short.MIN_VALUE).addRow(Byte.MIN_VALUE + 1, Short.MIN_VALUE + 1).build();
RowSetUtilities.verify(expected, actual);
}
Aggregations