use of org.apache.drill.exec.physical.resultSet.ResultSetLoader in project drill by apache.
the class TestResultSetLoaderRepeatedList method test2DEarlySchema.
@Test
public void test2DEarlySchema() {
final TupleMetadata schema = new SchemaBuilder().add("id", MinorType.INT).addRepeatedList("list2").addArray(MinorType.VARCHAR).resumeSchema().buildSchema();
final ResultSetLoaderImpl.ResultSetOptions options = new ResultSetOptionBuilder().readerSchema(schema).build();
final ResultSetLoader rsLoader = new ResultSetLoaderImpl(fixture.allocator(), options);
do2DTest(schema, rsLoader);
rsLoader.close();
}
use of org.apache.drill.exec.physical.resultSet.ResultSetLoader in project drill by apache.
the class TestResultSetLoaderRepeatedList method test2DOverflow.
@Test
public void test2DOverflow() {
final TupleMetadata schema = new SchemaBuilder().add("id", MinorType.INT).addRepeatedList("list2").addArray(MinorType.VARCHAR).resumeSchema().buildSchema();
final ResultSetLoaderImpl.ResultSetOptions options = new ResultSetOptionBuilder().rowCountLimit(ValueVector.MAX_ROW_COUNT).readerSchema(schema).build();
final ResultSetLoader rsLoader = new ResultSetLoaderImpl(fixture.allocator(), options);
final RowSetLoader writer = rsLoader.writer();
// Fill the batch with enough data to cause overflow.
// Data must be large enough to cause overflow before 64K rows
// Make a bit bigger to overflow early.
final int outerSize = 7;
final int innerSize = 5;
final int strLength = ValueVector.MAX_BUFFER_SIZE / ValueVector.MAX_ROW_COUNT / outerSize / innerSize + 20;
final byte[] value = new byte[strLength - 6];
Arrays.fill(value, (byte) 'X');
final String strValue = new String(value, Charsets.UTF_8);
int rowCount = 0;
int elementCount = 0;
final ArrayWriter outerWriter = writer.array(1);
final ArrayWriter innerWriter = outerWriter.array();
final ScalarWriter elementWriter = innerWriter.scalar();
rsLoader.startBatch();
while (!writer.isFull()) {
writer.start();
writer.scalar(0).setInt(rowCount);
for (int j = 0; j < outerSize; j++) {
for (int k = 0; k < innerSize; k++) {
elementWriter.setString(String.format("%s%06d", strValue, elementCount));
elementCount++;
}
outerWriter.save();
}
writer.save();
rowCount++;
}
// Number of rows should be driven by vector size.
// Our row count should include the overflow row
final int expectedCount = ValueVector.MAX_BUFFER_SIZE / (strLength * innerSize * outerSize);
assertEquals(expectedCount + 1, rowCount);
// Loader's row count should include only "visible" rows
assertEquals(expectedCount, writer.rowCount());
// Total count should include invisible and look-ahead rows.
assertEquals(expectedCount + 1, rsLoader.totalRowCount());
// Result should exclude the overflow row
RowSet result = fixture.wrap(rsLoader.harvest());
assertEquals(expectedCount, result.rowCount());
// Verify the data.
RowSetReader reader = result.reader();
ArrayReader outerReader = reader.array(1);
ArrayReader innerReader = outerReader.array();
ScalarReader strReader = innerReader.scalar();
int readRowCount = 0;
int readElementCount = 0;
while (reader.next()) {
assertEquals(readRowCount, reader.scalar(0).getInt());
for (int i = 0; i < outerSize; i++) {
assertTrue(outerReader.next());
for (int j = 0; j < innerSize; j++) {
assertTrue(innerReader.next());
assertEquals(String.format("%s%06d", strValue, readElementCount), strReader.getString());
readElementCount++;
}
assertFalse(innerReader.next());
}
assertFalse(outerReader.next());
readRowCount++;
}
assertEquals(readRowCount, result.rowCount());
result.clear();
// Write a few more rows to verify the overflow row.
rsLoader.startBatch();
for (int i = 0; i < 1000; i++) {
writer.start();
writer.scalar(0).setInt(rowCount);
for (int j = 0; j < outerSize; j++) {
for (int k = 0; k < innerSize; k++) {
elementWriter.setString(String.format("%s%06d", strValue, elementCount));
elementCount++;
}
outerWriter.save();
}
writer.save();
rowCount++;
}
result = fixture.wrap(rsLoader.harvest());
assertEquals(1001, result.rowCount());
final int startCount = readRowCount;
reader = result.reader();
outerReader = reader.array(1);
innerReader = outerReader.array();
strReader = innerReader.scalar();
while (reader.next()) {
assertEquals(readRowCount, reader.scalar(0).getInt());
for (int i = 0; i < outerSize; i++) {
assertTrue(outerReader.next());
for (int j = 0; j < innerSize; j++) {
assertTrue(innerReader.next());
elementWriter.setString(String.format("%s%06d", strValue, readElementCount));
assertEquals(String.format("%s%06d", strValue, readElementCount), strReader.getString());
readElementCount++;
}
assertFalse(innerReader.next());
}
assertFalse(outerReader.next());
readRowCount++;
}
assertEquals(readRowCount - startCount, result.rowCount());
result.clear();
rsLoader.close();
}
use of org.apache.drill.exec.physical.resultSet.ResultSetLoader in project drill by apache.
the class TestResultSetLoaderUnions method testVariantListDynamic.
/**
* Test a variant list created dynamically at load time.
* The list starts with no type, at which time it can hold
* only null values. Then we add a Varchar, and finally an
* Int.
* <p>
* This test is superficial. There are many odd cases to consider.
* <ul>
* <li>Write nulls to a list with no type. (This test ensures that
* adding a (nullable) scalar "does the right thing."</li>
* <li>Add a map to the list. Maps carry no "bits" vector, so null
* list entries to that point are lost. (For maps, we could go straight
* to a union, with just a map, to preserve the null states. This whole
* area is a huge mess...)</li>
* <li>Do the type transitions when writing to a row. (The tests here
* do the transition between rows.)</li>
* </ul>
*
* The reason for the sparse coverage is that Drill barely supports lists
* and unions; most code is just plain broken. Our goal here is not to fix
* all those problems, just to leave things no more broken than before.
*/
@Test
public void testVariantListDynamic() {
final ResultSetLoader rsLoader = new ResultSetLoaderImpl(fixture.allocator());
final RowSetLoader writer = rsLoader.writer();
// Can write a batch as if this was a repeated Varchar, except
// that any value can also be null.
rsLoader.startBatch();
writer.addColumn(MaterializedField.create("id", Types.required(MinorType.INT)));
writer.addColumn(MaterializedField.create("list", Types.optional(MinorType.LIST)));
// Sanity check: should be an array of variants because we said the
// types within the list are expandable (which is the default.)
final ArrayWriter arrWriter = writer.array("list");
assertEquals(ObjectType.VARIANT, arrWriter.entryType());
final VariantWriter variant = arrWriter.variant();
// We need to verify that the internal state is what we expect, so
// the next assertion peeks inside the private bits of the union
// writer. No client code should ever need to do this, of course.
assertTrue(((UnionWriterImpl) variant).shim() instanceof EmptyListShim);
// No types, so all we can do is add a null list, or a list of nulls.
writer.addRow(1, null).addRow(2, variantArray()).addRow(3, variantArray(null, null));
// Add a String. Now we can create a list of strings and/or nulls.
variant.addMember(MinorType.VARCHAR);
assertTrue(variant.hasType(MinorType.VARCHAR));
// Sanity check: sniff inside to ensure that the list contains a single
// type.
assertTrue(((UnionWriterImpl) variant).shim() instanceof SimpleListShim);
assertTrue(((ListWriterImpl) arrWriter).vector().getDataVector() instanceof NullableVarCharVector);
writer.addRow(4, variantArray("fred", null, "barney"));
// Add an integer. The list vector should be promoted to union.
// Now we can add both types.
variant.addMember(MinorType.INT);
// Sanity check: sniff inside to ensure promotion to union occurred
assertTrue(((UnionWriterImpl) variant).shim() instanceof UnionVectorShim);
assertTrue(((ListWriterImpl) arrWriter).vector().getDataVector() instanceof UnionVector);
writer.addRow(5, variantArray("wilma", null, 30));
// Verify
final RowSet result = fixture.wrap(rsLoader.harvest());
final TupleMetadata schema = new SchemaBuilder().add("id", MinorType.INT).addList("list").addType(MinorType.VARCHAR).addType(MinorType.INT).resumeSchema().buildSchema();
final SingleRowSet expected = fixture.rowSetBuilder(schema).addRow(1, null).addRow(2, variantArray()).addRow(3, variantArray(null, null)).addRow(4, variantArray("fred", null, "barney")).addRow(5, variantArray("wilma", null, 30)).build();
RowSetUtilities.verify(expected, result);
}
use of org.apache.drill.exec.physical.resultSet.ResultSetLoader in project drill by apache.
the class TestResultSetLoaderUnions method testUnionAddTypes.
@Test
public void testUnionAddTypes() {
final ResultSetLoader rsLoader = new ResultSetLoaderImpl(fixture.allocator());
final RowSetLoader writer = rsLoader.writer();
rsLoader.startBatch();
// First row, (1, "first"), create types as we go.
writer.start();
writer.addColumn(SchemaBuilder.columnSchema("id", MinorType.INT, DataMode.REQUIRED));
writer.scalar("id").setInt(1);
writer.addColumn(SchemaBuilder.columnSchema("u", MinorType.UNION, DataMode.OPTIONAL));
final VariantWriter variant = writer.column("u").variant();
variant.member(MinorType.VARCHAR).scalar().setString("first");
writer.save();
// Second row, (2, {20, "fred"}), create types as we go.
writer.start();
writer.scalar("id").setInt(2);
final TupleWriter innerMap = variant.member(MinorType.MAP).tuple();
innerMap.addColumn(SchemaBuilder.columnSchema("a", MinorType.INT, DataMode.OPTIONAL));
innerMap.scalar("a").setInt(20);
innerMap.addColumn(SchemaBuilder.columnSchema("b", MinorType.VARCHAR, DataMode.OPTIONAL));
innerMap.scalar("b").setString("fred");
writer.save();
// Write remaining rows using convenient methods, using
// schema defined above.
writer.addRow(3, null).addRow(4, mapValue(40, null)).addRow(5, "last");
// Verify the values.
// (Relies on the row set level union tests having passed.)
final TupleMetadata schema = new SchemaBuilder().add("id", MinorType.INT).addUnion("u").addType(MinorType.VARCHAR).addMap().addNullable("a", MinorType.INT).addNullable("b", MinorType.VARCHAR).resumeUnion().resumeSchema().buildSchema();
final SingleRowSet expected = fixture.rowSetBuilder(schema).addRow(1, "first").addRow(2, mapValue(20, "fred")).addRow(3, null).addRow(4, mapValue(40, null)).addRow(5, "last").build();
final RowSet result = fixture.wrap(rsLoader.harvest());
RowSetUtilities.verify(expected, result);
}
use of org.apache.drill.exec.physical.resultSet.ResultSetLoader in project drill by apache.
the class TestResultSetLoaderUnions method testVariantListWithMap.
/**
* Dynamically add a map to a list that also contains scalars.
* Assumes that {@link #testVariantListDynamic()} passed.
*/
@Test
public void testVariantListWithMap() {
final ResultSetLoader rsLoader = new ResultSetLoaderImpl(fixture.allocator());
final RowSetLoader writer = rsLoader.writer();
rsLoader.startBatch();
writer.addColumn(MaterializedField.create("id", Types.required(MinorType.INT)));
writer.addColumn(MaterializedField.create("list", Types.optional(MinorType.LIST)));
final ArrayWriter arrWriter = writer.array("list");
final VariantWriter variant = arrWriter.variant();
// Add a null list, or a list of nulls.
writer.addRow(1, null).addRow(2, variantArray()).addRow(3, variantArray(null, null));
// Add a String. Now we can create a list of strings and/or nulls.
variant.addMember(MinorType.VARCHAR);
writer.addRow(4, variantArray("fred", null, "barney"));
// Add a map
final TupleWriter mapWriter = variant.addMember(MinorType.MAP).tuple();
mapWriter.addColumn(MetadataUtils.newScalar("first", Types.optional(MinorType.VARCHAR)));
mapWriter.addColumn(MetadataUtils.newScalar("last", Types.optional(MinorType.VARCHAR)));
// Add a map-based record
writer.addRow(5, variantArray(mapValue("wilma", "flintstone"), mapValue("betty", "rubble")));
// Verify
final RowSet result = fixture.wrap(rsLoader.harvest());
final TupleMetadata schema = new SchemaBuilder().add("id", MinorType.INT).addList("list").addType(MinorType.VARCHAR).addMap().addNullable("first", MinorType.VARCHAR).addNullable("last", MinorType.VARCHAR).resumeUnion().resumeSchema().buildSchema();
final SingleRowSet expected = fixture.rowSetBuilder(schema).addRow(1, null).addRow(2, variantArray()).addRow(3, variantArray(null, null)).addRow(4, variantArray("fred", null, "barney")).addRow(5, variantArray(mapValue("wilma", "flintstone"), mapValue("betty", "rubble"))).build();
RowSetUtilities.verify(expected, result);
}
Aggregations