use of org.apache.drill.exec.vector.accessor.TupleWriter in project drill by axbaretto.
the class TestResultSetLoaderMaps method testMapAddition.
/**
* Test adding a map to a loader after writing the first row.
*/
@Test
public void testMapAddition() {
TupleMetadata schema = new SchemaBuilder().add("a", MinorType.INT).buildSchema();
ResultSetLoaderImpl.ResultSetOptions options = new OptionBuilder().setSchema(schema).build();
ResultSetLoader rsLoader = new ResultSetLoaderImpl(fixture.allocator(), options);
assertEquals(1, rsLoader.schemaVersion());
RowSetLoader rootWriter = rsLoader.writer();
// Start without the map. Add a map after the first row.
rsLoader.startBatch();
rootWriter.addRow(10);
int mapIndex = rootWriter.addColumn(SchemaBuilder.columnSchema("m", MinorType.MAP, DataMode.REQUIRED));
TupleWriter mapWriter = rootWriter.tuple(mapIndex);
// Add a column to the map with the same name as the top-level column.
// Verifies that the name spaces are independent.
mapWriter.addColumn(SchemaBuilder.columnSchema("a", MinorType.VARCHAR, DataMode.REQUIRED));
rootWriter.addRow(20, objArray("fred")).addRow(30, objArray("barney"));
RowSet actual = fixture.wrap(rsLoader.harvest());
assertEquals(3, rsLoader.schemaVersion());
assertEquals(3, actual.rowCount());
// Validate first batch
TupleMetadata expectedSchema = new SchemaBuilder().add("a", MinorType.INT).addMap("m").add("a", MinorType.VARCHAR).resumeSchema().buildSchema();
SingleRowSet expected = fixture.rowSetBuilder(expectedSchema).addRow(10, objArray("")).addRow(20, objArray("fred")).addRow(30, objArray("barney")).build();
new RowSetComparison(expected).verifyAndClearAll(actual);
rsLoader.close();
}
use of org.apache.drill.exec.vector.accessor.TupleWriter in project drill by axbaretto.
the class TestResultSetLoaderMaps method testMapWithArray.
/**
* Test a map that contains a scalar array. No reason to suspect that this
* will have problem as the array writer is fully tested in the accessor
* subsystem. Still, need to test the cardinality methods of the loader
* layer.
*/
@Test
public void testMapWithArray() {
TupleMetadata schema = new SchemaBuilder().add("a", MinorType.INT).addMap("m").addArray("c", MinorType.INT).addArray("d", MinorType.VARCHAR).resumeSchema().buildSchema();
ResultSetLoaderImpl.ResultSetOptions options = new OptionBuilder().setSchema(schema).build();
ResultSetLoader rsLoader = new ResultSetLoaderImpl(fixture.allocator(), options);
RowSetLoader rootWriter = rsLoader.writer();
// Write some rows
rsLoader.startBatch();
rootWriter.addRow(10, objArray(intArray(110, 120, 130), strArray("d1.1", "d1.2", "d1.3", "d1.4"))).addRow(20, objArray(intArray(210), strArray())).addRow(30, objArray(intArray(), strArray("d3.1")));
// Validate first batch
RowSet actual = fixture.wrap(rsLoader.harvest());
SingleRowSet expected = fixture.rowSetBuilder(schema).addRow(10, objArray(intArray(110, 120, 130), strArray("d1.1", "d1.2", "d1.3", "d1.4"))).addRow(20, objArray(intArray(210), strArray())).addRow(30, objArray(intArray(), strArray("d3.1"))).build();
new RowSetComparison(expected).verifyAndClearAll(actual);
// Add another array after the first row in the second batch.
rsLoader.startBatch();
rootWriter.addRow(40, objArray(intArray(410, 420), strArray("d4.1", "d4.2"))).addRow(50, objArray(intArray(510), strArray("d5.1")));
TupleWriter mapWriter = rootWriter.tuple("m");
mapWriter.addColumn(SchemaBuilder.columnSchema("e", MinorType.VARCHAR, DataMode.REPEATED));
rootWriter.addRow(60, objArray(intArray(610, 620), strArray("d6.1", "d6.2"), strArray("e6.1", "e6.2"))).addRow(70, objArray(intArray(710), strArray(), strArray("e7.1", "e7.2")));
// Validate first batch. The new array should have been back-filled with
// empty offsets for the missing rows.
actual = fixture.wrap(rsLoader.harvest());
// System.out.println(actual.schema().toString());
expected = fixture.rowSetBuilder(actual.schema()).addRow(40, objArray(intArray(410, 420), strArray("d4.1", "d4.2"), strArray())).addRow(50, objArray(intArray(510), strArray("d5.1"), strArray())).addRow(60, objArray(intArray(610, 620), strArray("d6.1", "d6.2"), strArray("e6.1", "e6.2"))).addRow(70, objArray(intArray(710), strArray(), strArray("e7.1", "e7.2"))).build();
// expected.print();
new RowSetComparison(expected).verifyAndClearAll(actual);
rsLoader.close();
}
use of org.apache.drill.exec.vector.accessor.TupleWriter in project drill by axbaretto.
the class TestResultSetLoaderMapArray method testCloseWithoutHarvest.
/**
* Test that memory is released if the loader is closed with an active
* batch (that is, before the batch is harvested.)
*/
@Test
public void testCloseWithoutHarvest() {
TupleMetadata schema = new SchemaBuilder().addMapArray("m").add("a", MinorType.INT).add("b", MinorType.VARCHAR).resumeSchema().buildSchema();
ResultSetLoaderImpl.ResultSetOptions options = new OptionBuilder().setSchema(schema).setRowCountLimit(ValueVector.MAX_ROW_COUNT).build();
ResultSetLoader rsLoader = new ResultSetLoaderImpl(fixture.allocator(), options);
RowSetLoader rootWriter = rsLoader.writer();
ArrayWriter maWriter = rootWriter.array("m");
TupleWriter mWriter = maWriter.tuple();
rsLoader.startBatch();
for (int i = 0; i < 40; i++) {
rootWriter.start();
for (int j = 0; j < 3; j++) {
mWriter.scalar("a").setInt(i);
mWriter.scalar("b").setString("b-" + i);
maWriter.save();
}
rootWriter.save();
}
// Don't harvest the batch. Allocator will complain if the
// loader does not release memory.
rsLoader.close();
}
use of org.apache.drill.exec.vector.accessor.TupleWriter in project drill by axbaretto.
the class TestResultSetLoaderMapArray method testOmittedValues.
/**
* Check that the "fill-empties" logic descends down into
* a repeated map.
*/
@Test
public void testOmittedValues() {
TupleMetadata schema = new SchemaBuilder().add("id", MinorType.INT).addMapArray("m").addNullable("a", MinorType.INT).addNullable("b", MinorType.VARCHAR).resumeSchema().buildSchema();
ResultSetLoaderImpl.ResultSetOptions options = new OptionBuilder().setSchema(schema).setRowCountLimit(ValueVector.MAX_ROW_COUNT).build();
ResultSetLoader rsLoader = new ResultSetLoaderImpl(fixture.allocator(), options);
RowSetLoader rootWriter = rsLoader.writer();
int mapSkip = 5;
int entrySkip = 3;
int rowCount = 1000;
int entryCount = 10;
rsLoader.startBatch();
ArrayWriter maWriter = rootWriter.array("m");
TupleWriter mWriter = maWriter.tuple();
for (int i = 0; i < rowCount; i++) {
rootWriter.start();
rootWriter.scalar(0).setInt(i);
if (i % mapSkip != 0) {
for (int j = 0; j < entryCount; j++) {
if (j % entrySkip != 0) {
mWriter.scalar(0).setInt(i * entryCount + j);
mWriter.scalar(1).setString("b-" + i + "." + j);
}
maWriter.save();
}
}
rootWriter.save();
}
RowSet result = fixture.wrap(rsLoader.harvest());
assertEquals(rowCount, result.rowCount());
RowSetReader reader = result.reader();
ArrayReader maReader = reader.array("m");
TupleReader mReader = maReader.tuple();
for (int i = 0; i < rowCount; i++) {
assertTrue(reader.next());
assertEquals(i, reader.scalar(0).getInt());
if (i % mapSkip == 0) {
assertEquals(0, maReader.size());
continue;
}
assertEquals(entryCount, maReader.size());
for (int j = 0; j < entryCount; j++) {
maReader.setPosn(j);
if (j % entrySkip == 0) {
assertTrue(mReader.scalar(0).isNull());
assertTrue(mReader.scalar(1).isNull());
} else {
assertFalse(mReader.scalar(0).isNull());
assertFalse(mReader.scalar(1).isNull());
assertEquals(i * entryCount + j, mReader.scalar(0).getInt());
assertEquals("b-" + i + "." + j, mReader.scalar(1).getString());
}
}
}
result.clear();
rsLoader.close();
}
use of org.apache.drill.exec.vector.accessor.TupleWriter in project drill by axbaretto.
the class TestResultSetLoaderMapArray method testDoubleNestedArray.
/**
* Test a doubly-nested arrays of maps.
*/
@Test
public void testDoubleNestedArray() {
TupleMetadata schema = new SchemaBuilder().add("a", MinorType.INT).addMapArray("m1").add("b", MinorType.INT).addMapArray("m2").add("c", MinorType.INT).addArray("d", MinorType.VARCHAR).resumeMap().resumeSchema().buildSchema();
ResultSetLoaderImpl.ResultSetOptions options = new OptionBuilder().setSchema(schema).build();
ResultSetLoader rsLoader = new ResultSetLoaderImpl(fixture.allocator(), options);
RowSetLoader rootWriter = rsLoader.writer();
rsLoader.startBatch();
ScalarWriter aWriter = rootWriter.scalar("a");
ArrayWriter a1Writer = rootWriter.array("m1");
TupleWriter m1Writer = a1Writer.tuple();
ScalarWriter bWriter = m1Writer.scalar("b");
ArrayWriter a2Writer = m1Writer.array("m2");
TupleWriter m2Writer = a2Writer.tuple();
ScalarWriter cWriter = m2Writer.scalar("c");
ScalarWriter dWriter = m2Writer.array("d").scalar();
for (int i = 0; i < 5; i++) {
rootWriter.start();
aWriter.setInt(i);
for (int j = 0; j < 4; j++) {
int a1Key = i + 10 + j;
bWriter.setInt(a1Key);
for (int k = 0; k < 3; k++) {
int a2Key = a1Key * 10 + k;
cWriter.setInt(a2Key);
for (int l = 0; l < 2; l++) {
dWriter.setString("d-" + (a2Key * 10 + l));
}
a2Writer.save();
}
a1Writer.save();
}
rootWriter.save();
}
RowSet results = fixture.wrap(rsLoader.harvest());
RowSetReader reader = results.reader();
ScalarReader aReader = reader.scalar("a");
ArrayReader a1Reader = reader.array("m1");
TupleReader m1Reader = a1Reader.tuple();
ScalarReader bReader = m1Reader.scalar("b");
ArrayReader a2Reader = m1Reader.array("m2");
TupleReader m2Reader = a2Reader.tuple();
ScalarReader cReader = m2Reader.scalar("c");
ScalarElementReader dReader = m2Reader.elements("d");
for (int i = 0; i < 5; i++) {
reader.next();
assertEquals(i, aReader.getInt());
for (int j = 0; j < 4; j++) {
a1Reader.setPosn(j);
int a1Key = i + 10 + j;
assertEquals(a1Key, bReader.getInt());
for (int k = 0; k < 3; k++) {
a2Reader.setPosn(k);
int a2Key = a1Key * 10 + k;
assertEquals(a2Key, cReader.getInt());
for (int l = 0; l < 2; l++) {
assertEquals("d-" + (a2Key * 10 + l), dReader.getString(l));
}
}
}
}
rsLoader.close();
}
Aggregations