use of org.apache.drill.exec.physical.resultSet.ResultSetLoader in project drill by apache.
the class TestResultSetLoaderOverflow method testSizeLimitOnArray.
/**
* Test a row with a single array column which overflows. Verifies
* that all the fiddly bits about offset vectors and so on works
* correctly. Run this test (the simplest case) if you change anything
* about the array handling code.
*/
@Test
public void testSizeLimitOnArray() {
TupleMetadata schema = new SchemaBuilder().addArray("s", MinorType.VARCHAR).buildSchema();
ResultSetOptions options = new ResultSetOptionBuilder().rowCountLimit(ValueVector.MAX_ROW_COUNT).readerSchema(schema).build();
ResultSetLoader rsLoader = new ResultSetLoaderImpl(fixture.allocator(), options);
RowSetLoader rootWriter = rsLoader.writer();
// Fill batch with rows of with a single array, three values each. Tack on
// a suffix to each so we can be sure the proper data is written and moved
// to the overflow batch.
rsLoader.startBatch();
byte[] value = new byte[473];
Arrays.fill(value, (byte) 'X');
String strValue = new String(value, Charsets.UTF_8);
int valuesPerArray = 13;
int count = 0;
{
int rowSize = 0;
int totalSize = 0;
while (rootWriter.start()) {
totalSize += rowSize;
rowSize = 0;
ScalarWriter array = rootWriter.array(0).scalar();
for (int i = 0; i < valuesPerArray; i++) {
String cellValue = strValue + (count + 1) + "." + i;
array.setString(cellValue);
rowSize += cellValue.length();
}
rootWriter.save();
count++;
}
// Row count should include the overflow row.
int expectedCount = count - 1;
// Size without overflow row should fit in the vector, size
// with overflow should not.
assertTrue(totalSize <= ValueVector.MAX_BUFFER_SIZE);
assertTrue(totalSize + rowSize > ValueVector.MAX_BUFFER_SIZE);
// Result should exclude the overflow row. Last row
// should hold the last full array.
VectorContainer container = rsLoader.harvest();
BatchValidator.validate(container);
RowSet result = fixture.wrap(container);
assertEquals(expectedCount, result.rowCount());
RowSetReader reader = result.reader();
reader.setPosition(expectedCount - 1);
ArrayReader arrayReader = reader.array(0);
ScalarReader strReader = arrayReader.scalar();
assertEquals(valuesPerArray, arrayReader.size());
for (int i = 0; i < valuesPerArray; i++) {
assertTrue(arrayReader.next());
String cellValue = strValue + (count - 1) + "." + i;
assertEquals(cellValue, strReader.getString());
}
result.clear();
}
// Next batch should start with the overflow row.
// The only row in this next batch should be the whole
// array being written at the time of overflow.
{
rsLoader.startBatch();
assertEquals(1, rootWriter.rowCount());
assertEquals(count, rsLoader.totalRowCount());
VectorContainer container = rsLoader.harvest();
BatchValidator.validate(container);
RowSet result = fixture.wrap(container);
assertEquals(1, result.rowCount());
RowSetReader reader = result.reader();
reader.next();
ArrayReader arrayReader = reader.array(0);
ScalarReader strReader = arrayReader.scalar();
assertEquals(valuesPerArray, arrayReader.size());
for (int i = 0; i < valuesPerArray; i++) {
assertTrue(arrayReader.next());
String cellValue = strValue + count + "." + i;
assertEquals(cellValue, strReader.getString());
}
result.clear();
}
rsLoader.close();
}
use of org.apache.drill.exec.physical.resultSet.ResultSetLoader in project drill by apache.
the class TestResultSetLoaderProtocol method testCaseInsensitiveSchema.
/**
* Schemas are case insensitive by default. Verify that
* the schema mechanism works, with emphasis on the
* case insensitive case.
* <p>
* The tests here and elsewhere build columns from a
* <tt>MaterializedField</tt>. Doing so is rather old-school;
* better to use the newer <tt>ColumnMetadata</tt> which provides
* additional information. The code here simply uses the <tt>MaterializedField</tt>
* to create a <tt>ColumnMetadata</tt> implicitly.
*/
@Test
public void testCaseInsensitiveSchema() {
ResultSetLoader rsLoader = new ResultSetLoaderImpl(fixture.allocator());
RowSetLoader rootWriter = rsLoader.writer();
TupleMetadata schema = rootWriter.tupleSchema();
assertEquals(0, rsLoader.schemaVersion());
// No columns defined in schema
assertNull(schema.metadata("a"));
try {
schema.column(0);
fail();
} catch (IndexOutOfBoundsException e) {
// Expected
}
try {
rootWriter.column("a");
fail();
} catch (UndefinedColumnException e) {
// Expected
}
try {
rootWriter.column(0);
fail();
} catch (IndexOutOfBoundsException e) {
// Expected
}
// Define a column
assertEquals(0, rsLoader.schemaVersion());
MaterializedField colSchema = SchemaBuilder.columnSchema("a", MinorType.VARCHAR, DataMode.REQUIRED);
rootWriter.addColumn(colSchema);
assertEquals(1, rsLoader.schemaVersion());
// Can now be found, case insensitive
assertTrue(colSchema.isEquivalent(schema.column(0)));
ColumnMetadata colMetadata = schema.metadata(0);
assertSame(colMetadata, schema.metadata("a"));
assertSame(colMetadata, schema.metadata("A"));
assertNotNull(rootWriter.column(0));
assertNotNull(rootWriter.column("a"));
assertNotNull(rootWriter.column("A"));
assertEquals(1, schema.size());
assertEquals(0, schema.index("a"));
assertEquals(0, schema.index("A"));
try {
rootWriter.addColumn(colSchema);
fail();
} catch (UserException e) {
// Expected
}
try {
MaterializedField testCol = SchemaBuilder.columnSchema("A", MinorType.VARCHAR, DataMode.REQUIRED);
rootWriter.addColumn(testCol);
fail();
} catch (UserException e) {
// Expected
assertTrue(e.getMessage().contains("Duplicate"));
}
// Can still add required fields while writing the first row.
rsLoader.startBatch();
rootWriter.start();
rootWriter.scalar(0).setString("foo");
MaterializedField col2 = SchemaBuilder.columnSchema("b", MinorType.VARCHAR, DataMode.REQUIRED);
rootWriter.addColumn(col2);
assertEquals(2, rsLoader.schemaVersion());
assertTrue(col2.isEquivalent(schema.column(1)));
ColumnMetadata col2Metadata = schema.metadata(1);
assertSame(col2Metadata, schema.metadata("b"));
assertSame(col2Metadata, schema.metadata("B"));
assertEquals(2, schema.size());
assertEquals(1, schema.index("b"));
assertEquals(1, schema.index("B"));
rootWriter.scalar(1).setString("second");
// After first row, can add an optional or repeated.
// Also allows a required field: values will be back-filled.
rootWriter.save();
rootWriter.start();
rootWriter.scalar(0).setString("bar");
rootWriter.scalar(1).setString("");
MaterializedField col3 = SchemaBuilder.columnSchema("c", MinorType.VARCHAR, DataMode.REQUIRED);
rootWriter.addColumn(col3);
assertEquals(3, rsLoader.schemaVersion());
assertTrue(col3.isEquivalent(schema.column(2)));
ColumnMetadata col3Metadata = schema.metadata(2);
assertSame(col3Metadata, schema.metadata("c"));
assertSame(col3Metadata, schema.metadata("C"));
assertEquals(3, schema.size());
assertEquals(2, schema.index("c"));
assertEquals(2, schema.index("C"));
rootWriter.scalar("c").setString("c.2");
MaterializedField col4 = SchemaBuilder.columnSchema("d", MinorType.VARCHAR, DataMode.OPTIONAL);
rootWriter.addColumn(col4);
assertEquals(4, rsLoader.schemaVersion());
assertTrue(col4.isEquivalent(schema.column(3)));
ColumnMetadata col4Metadata = schema.metadata(3);
assertSame(col4Metadata, schema.metadata("d"));
assertSame(col4Metadata, schema.metadata("D"));
assertEquals(4, schema.size());
assertEquals(3, schema.index("d"));
assertEquals(3, schema.index("D"));
rootWriter.scalar("d").setString("d.2");
MaterializedField col5 = SchemaBuilder.columnSchema("e", MinorType.VARCHAR, DataMode.REPEATED);
rootWriter.addColumn(col5);
assertEquals(5, rsLoader.schemaVersion());
assertTrue(col5.isEquivalent(schema.column(4)));
ColumnMetadata col5Metadata = schema.metadata(4);
assertSame(col5Metadata, schema.metadata("e"));
assertSame(col5Metadata, schema.metadata("E"));
assertEquals(5, schema.size());
assertEquals(4, schema.index("e"));
assertEquals(4, schema.index("E"));
rootWriter.array(4).setObject(strArray("e1", "e2", "e3"));
MaterializedField col6 = SchemaBuilder.columnSchema("f", MinorType.BIGINT, DataMode.REPEATED);
rootWriter.addColumn(col6);
assertEquals(6, rsLoader.schemaVersion());
assertTrue(col6.isEquivalent(schema.column(5)));
ColumnMetadata col6Metadata = schema.metadata(5);
assertSame(col6Metadata, schema.metadata("f"));
assertSame(col6Metadata, schema.metadata("F"));
assertEquals(6, schema.size());
assertEquals(5, schema.index("f"));
assertEquals(5, schema.index("F"));
rootWriter.array(5).setObject(new Long[] { Long.MIN_VALUE, Long.MAX_VALUE });
MaterializedField col7 = SchemaBuilder.columnSchema("g", MinorType.INT, DataMode.REPEATED);
rootWriter.addColumn(col7);
assertEquals(7, rsLoader.schemaVersion());
assertTrue(col7.isEquivalent(schema.column(6)));
ColumnMetadata col7Metadata = schema.metadata(6);
assertSame(col7Metadata, schema.metadata("g"));
assertSame(col7Metadata, schema.metadata("G"));
assertEquals(7, schema.size());
assertEquals(6, schema.index("g"));
assertEquals(6, schema.index("G"));
rootWriter.array(6).setObject(new Integer[] { Integer.MIN_VALUE, Integer.MAX_VALUE });
MaterializedField col8 = SchemaBuilder.columnSchema("h", MinorType.INT, DataMode.REPEATED);
rootWriter.addColumn(col8);
assertEquals(8, rsLoader.schemaVersion());
assertTrue(col8.isEquivalent(schema.column(7)));
ColumnMetadata col8Metadata = schema.metadata(7);
assertSame(col8Metadata, schema.metadata("h"));
assertSame(col8Metadata, schema.metadata("H"));
assertEquals(8, schema.size());
assertEquals(7, schema.index("h"));
assertEquals(7, schema.index("H"));
rootWriter.array(7).setObject(new Short[] { Short.MIN_VALUE, Short.MAX_VALUE });
MaterializedField col9 = SchemaBuilder.columnSchema("i", MinorType.INT, DataMode.REPEATED);
rootWriter.addColumn(col9);
assertEquals(9, rsLoader.schemaVersion());
assertTrue(col9.isEquivalent(schema.column(8)));
ColumnMetadata col9Metadata = schema.metadata(8);
assertSame(col9Metadata, schema.metadata("i"));
assertSame(col9Metadata, schema.metadata("I"));
assertEquals(9, schema.size());
assertEquals(8, schema.index("i"));
assertEquals(8, schema.index("I"));
rootWriter.array(8).setObject(new Byte[] { Byte.MIN_VALUE, Byte.MAX_VALUE });
MaterializedField col10 = SchemaBuilder.columnSchema("j", MinorType.FLOAT8, DataMode.REPEATED);
rootWriter.addColumn(col10);
assertEquals(10, rsLoader.schemaVersion());
assertTrue(col10.isEquivalent(schema.column(9)));
ColumnMetadata col10Metadata = schema.metadata(9);
assertSame(col10Metadata, schema.metadata("j"));
assertSame(col10Metadata, schema.metadata("J"));
assertEquals(10, schema.size());
assertEquals(9, schema.index("j"));
assertEquals(9, schema.index("J"));
rootWriter.array(9).setObject(new Double[] { Double.MIN_VALUE, Double.MAX_VALUE });
MaterializedField col11 = SchemaBuilder.columnSchema("k", MinorType.FLOAT4, DataMode.REPEATED);
rootWriter.addColumn(col11);
assertEquals(11, rsLoader.schemaVersion());
assertTrue(col11.isEquivalent(schema.column(10)));
ColumnMetadata col11Metadata = schema.metadata(10);
assertSame(col11Metadata, schema.metadata("k"));
assertSame(col11Metadata, schema.metadata("K"));
assertEquals(11, schema.size());
assertEquals(10, schema.index("k"));
assertEquals(10, schema.index("K"));
rootWriter.array(10).setObject(new Float[] { Float.MIN_VALUE, Float.MAX_VALUE });
MaterializedField col12 = SchemaBuilder.columnSchema("l", MinorType.BIT, DataMode.REPEATED);
rootWriter.addColumn(col12);
assertEquals(12, rsLoader.schemaVersion());
assertTrue(col12.isEquivalent(schema.column(11)));
ColumnMetadata col12Metadata = schema.metadata(11);
assertSame(col12Metadata, schema.metadata("l"));
assertSame(col12Metadata, schema.metadata("L"));
assertEquals(12, schema.size());
assertEquals(11, schema.index("l"));
assertEquals(11, schema.index("L"));
rootWriter.array(11).setObject(new Boolean[] { Boolean.TRUE, Boolean.FALSE });
rootWriter.save();
// Verify. No reason to expect problems, but might as well check.
RowSet result = fixture.wrap(rsLoader.harvest());
assertEquals(12, rsLoader.schemaVersion());
SingleRowSet expected = fixture.rowSetBuilder(result.batchSchema()).addRow("foo", "second", "", null, strArray(), longArray(), intArray(), shortArray(), byteArray(), doubleArray(), floatArray(), boolArray()).addRow("bar", "", "c.2", "d.2", strArray("e1", "e2", "e3"), longArray(Long.MIN_VALUE, Long.MAX_VALUE), intArray(Integer.MIN_VALUE, Integer.MAX_VALUE), shortArray(Short.MIN_VALUE, Short.MAX_VALUE), byteArray((int) Byte.MIN_VALUE, (int) Byte.MAX_VALUE), doubleArray(Double.MIN_VALUE, Double.MAX_VALUE), floatArray(Float.MIN_VALUE, Float.MAX_VALUE), boolArray(Boolean.TRUE, Boolean.FALSE)).build();
RowSetUtilities.verify(expected, result);
// Handy way to test that close works to abort an in-flight batch
// and clean up.
rsLoader.close();
}
use of org.apache.drill.exec.physical.resultSet.ResultSetLoader in project drill by apache.
the class TestResultSetLoaderProtocol method testOverwriteRow.
/**
* The writer protocol allows a client to write to a row any number of times
* before invoking {@code save()}. In this case, each new value simply
* overwrites the previous value. Here, we test the most basic case: a simple,
* flat tuple with no arrays. We use a very large Varchar that would, if
* overwrite were not working, cause vector overflow.
* <p>
* The ability to overwrite rows is seldom needed except in one future use
* case: writing a row, then applying a filter "in-place" to discard unwanted
* rows, without having to send the row downstream.
* <p>
* Because of this use case, specific rules apply when discarding row or
* overwriting values.
* <ul>
* <li>Values can be written once per row. Fixed-width columns actually allow
* multiple writes. But, because of the way variable-width columns work,
* multiple writes will cause undefined results.</li>
* <li>To overwrite a row, call <tt>start()</tt> without calling
* <tt>save()</tt> on the previous row. Doing so ignores data for the
* previous row and starts a new row in place of the old one.</li>
* </ul>
* Note that there is no explicit method to discard a row. Instead,
* the rule is that a row is not saved until <tt>save()</tt> is called.
*/
@Test
public void testOverwriteRow() {
TupleMetadata schema = new SchemaBuilder().add("a", MinorType.INT).add("b", MinorType.VARCHAR).buildSchema();
ResultSetLoaderImpl.ResultSetOptions options = new ResultSetOptionBuilder().readerSchema(schema).rowCountLimit(ValueVector.MAX_ROW_COUNT).build();
ResultSetLoader rsLoader = new ResultSetLoaderImpl(fixture.allocator(), options);
RowSetLoader rootWriter = rsLoader.writer();
// Can't use the shortcut to populate rows when doing overwrites.
ScalarWriter aWriter = rootWriter.scalar("a");
ScalarWriter bWriter = rootWriter.scalar("b");
// Write 100,000 rows, overwriting 99% of them. This will cause vector
// overflow and data corruption if overwrite does not work; but will happily
// produce the correct result if everything works as it should.
byte[] value = new byte[512];
Arrays.fill(value, (byte) 'X');
int count = 0;
rsLoader.startBatch();
while (count < 100_000) {
rootWriter.start();
count++;
aWriter.setInt(count);
bWriter.setBytes(value, value.length);
if (count % 100 == 0) {
rootWriter.save();
}
}
// Verify using a reader.
RowSet result = fixture.wrap(rsLoader.harvest());
assertEquals(count / 100, result.rowCount());
RowSetReader reader = result.reader();
int rowId = 1;
while (reader.next()) {
assertEquals(rowId * 100, reader.scalar("a").getInt());
assertTrue(Arrays.equals(value, reader.scalar("b").getBytes()));
rowId++;
}
result.clear();
rsLoader.close();
}
use of org.apache.drill.exec.physical.resultSet.ResultSetLoader in project drill by apache.
the class TestScanOrchestratorImplicitColumns method testMixture.
/**
* Test SELECT dir0, b, suffix, c FROM table(a, b)
* Full combination of metadata, table and null columns
*/
@Test
public void testMixture() {
ScanOrchestratorBuilder builder = new MockScanBuilder();
File file = dirTestWatcher.copyResourceToRoot(Paths.get("multilevel", "csv", "1994", "Q1", "orders_94_q1.csv"), Paths.get("x", "y", "z.csv"));
Path filePath = new Path(file.toURI().getPath());
ImplicitColumnManager metadataManager = new ImplicitColumnManager(fixture.getOptionManager(), standardOptions(filePath));
builder.withImplicitColumns(metadataManager);
// SELECT dir0, b, suffix, c ...
builder.projection(RowSetTestUtils.projectList("dir0", "b", "suffix", "c"));
ScanSchemaOrchestrator scanner = new ScanSchemaOrchestrator(fixture.allocator(), builder);
// ... FROM file
metadataManager.startFile(filePath);
ReaderSchemaOrchestrator reader = scanner.startReader();
// file schema (a, b)
TupleMetadata tableSchema = new SchemaBuilder().add("a", MinorType.INT).add("b", MinorType.VARCHAR).buildSchema();
// Create the table loader
ResultSetLoader loader = reader.makeTableLoader(tableSchema);
TupleMetadata expectedSchema = new SchemaBuilder().addNullable("dir0", MinorType.VARCHAR).add("b", MinorType.VARCHAR).add("suffix", MinorType.VARCHAR).addNullable("c", MinorType.INT).buildSchema();
// Create a batch of data.
reader.startBatch();
loader.writer().addRow(1, "fred").addRow(2, "wilma");
reader.endBatch();
// Verify
SingleRowSet expected = fixture.rowSetBuilder(expectedSchema).addRow("x", "fred", "csv", null).addRow("x", "wilma", "csv", null).build();
RowSetUtilities.verify(expected, fixture.wrap(scanner.output()));
scanner.close();
}
use of org.apache.drill.exec.physical.resultSet.ResultSetLoader in project drill by apache.
the class TestSchemaSmoothing method testWildcardSmoothing.
/**
* A SELECT * query uses the schema of the table as the output schema.
* This is trivial when the scanner has one table. But, if two or more
* tables occur, then things get interesting. The first table sets the
* schema. The second table then has:
* <ul>
* <li>The same schema, trivial case.</li>
* <li>A subset of the first table. The type of the "missing" column
* from the first table is used for a null column in the second table.</li>
* <li>A superset or disjoint set of the first schema. This triggers a hard schema
* change.</li>
* </ul>
* <p>
* It is an open question whether previous columns should be preserved on
* a hard reset. For now, the code implements, and this test verifies, that a
* hard reset clears the "memory" of prior schemas.
*/
@Test
public void testWildcardSmoothing() {
ScanOrchestratorBuilder builder = new MockScanBuilder();
builder.enableSchemaSmoothing(true);
builder.projection(RowSetTestUtils.projectAll());
final ScanSchemaOrchestrator projector = new ScanSchemaOrchestrator(fixture.allocator(), builder);
final TupleMetadata firstSchema = new SchemaBuilder().add("a", MinorType.INT).addNullable("b", MinorType.VARCHAR, 10).addNullable("c", MinorType.BIGINT).buildSchema();
final TupleMetadata subsetSchema = new SchemaBuilder().addNullable("b", MinorType.VARCHAR, 10).add("a", MinorType.INT).buildSchema();
final TupleMetadata disjointSchema = new SchemaBuilder().add("a", MinorType.INT).addNullable("b", MinorType.VARCHAR, 10).add("d", MinorType.VARCHAR).buildSchema();
final SchemaTracker tracker = new SchemaTracker();
int schemaVersion;
{
// First table, establishes the baseline
// ... FROM table 1
final ReaderSchemaOrchestrator reader = projector.startReader();
final ResultSetLoader loader = reader.makeTableLoader(firstSchema);
reader.startBatch();
loader.writer().addRow(10, "fred", 110L).addRow(20, "wilma", 110L);
reader.endBatch();
tracker.trackSchema(projector.output());
schemaVersion = tracker.schemaVersion();
final SingleRowSet expected = fixture.rowSetBuilder(firstSchema).addRow(10, "fred", 110L).addRow(20, "wilma", 110L).build();
new RowSetComparison(expected).verifyAndClearAll(fixture.wrap(projector.output()));
}
{
// Second table, same schema, the trivial case
// ... FROM table 2
final ReaderSchemaOrchestrator reader = projector.startReader();
final ResultSetLoader loader = reader.makeTableLoader(firstSchema);
reader.startBatch();
loader.writer().addRow(70, "pebbles", 770L).addRow(80, "hoppy", 880L);
reader.endBatch();
tracker.trackSchema(projector.output());
assertEquals(schemaVersion, tracker.schemaVersion());
final SingleRowSet expected = fixture.rowSetBuilder(firstSchema).addRow(70, "pebbles", 770L).addRow(80, "hoppy", 880L).build();
new RowSetComparison(expected).verifyAndClearAll(fixture.wrap(projector.output()));
}
{
// Third table: subset schema of first two
// ... FROM table 3
final ReaderSchemaOrchestrator reader = projector.startReader();
final ResultSetLoader loader = reader.makeTableLoader(subsetSchema);
reader.startBatch();
loader.writer().addRow("bambam", 30).addRow("betty", 40);
reader.endBatch();
tracker.trackSchema(projector.output());
assertEquals(schemaVersion, tracker.schemaVersion());
final SingleRowSet expected = fixture.rowSetBuilder(firstSchema).addRow(30, "bambam", null).addRow(40, "betty", null).build();
new RowSetComparison(expected).verifyAndClearAll(fixture.wrap(projector.output()));
}
{
// Fourth table: disjoint schema, cases a schema reset
// ... FROM table 4
final ReaderSchemaOrchestrator reader = projector.startReader();
final ResultSetLoader loader = reader.makeTableLoader(disjointSchema);
reader.startBatch();
loader.writer().addRow(50, "dino", "supporting").addRow(60, "barney", "main");
reader.endBatch();
tracker.trackSchema(projector.output());
assertNotEquals(schemaVersion, tracker.schemaVersion());
final SingleRowSet expected = fixture.rowSetBuilder(disjointSchema).addRow(50, "dino", "supporting").addRow(60, "barney", "main").build();
new RowSetComparison(expected).verifyAndClearAll(fixture.wrap(projector.output()));
}
projector.close();
}
Aggregations