use of org.apache.drill.exec.vector.complex.DictVector in project drill by apache.
the class ObjectDictWriter method buildDictArray.
public static ArrayObjectWriter buildDictArray(ColumnMetadata metadata, RepeatedDictVector vector, List<AbstractObjectWriter> keyValueWriters) {
final DictVector dataVector;
if (vector != null) {
dataVector = (DictVector) vector.getDataVector();
} else {
dataVector = null;
}
ObjectDictWriter.DictObjectWriter dictWriter = buildDict(metadata, dataVector, keyValueWriters);
AbstractArrayWriter arrayWriter;
if (vector != null) {
arrayWriter = new ObjectArrayWriter(metadata, vector.getOffsetVector(), dictWriter);
} else {
arrayWriter = new DummyArrayWriter(metadata, dictWriter);
}
return new ArrayObjectWriter(arrayWriter);
}
use of org.apache.drill.exec.vector.complex.DictVector in project drill by apache.
the class TestResultSetLoaderDictArray method testBasics.
@Test
public void testBasics() {
TupleMetadata schema = new SchemaBuilder().add("a", MinorType.INT).addDictArray("d", MinorType.INT).value(MinorType.VARCHAR).resumeSchema().buildSchema();
ResultSetLoaderImpl.ResultSetOptions options = new ResultSetOptionBuilder().readerSchema(schema).build();
ResultSetLoader rsLoader = new ResultSetLoaderImpl(fixture.allocator(), options);
RowSetLoader rootWriter = rsLoader.writer();
// Verify structure and schema
TupleMetadata actualSchema = rootWriter.tupleSchema();
assertEquals(2, actualSchema.size());
assertTrue(actualSchema.metadata(1).isArray());
assertTrue(actualSchema.metadata(1).isDict());
assertEquals(2, actualSchema.metadata("d").tupleSchema().size());
assertEquals(2, actualSchema.column("d").getChildren().size());
DictWriter dictWriter = rootWriter.array("d").dict();
assertSame(actualSchema.metadata("d").tupleSchema(), dictWriter.schema().tupleSchema());
// Write a couple of rows with arrays.
rsLoader.startBatch();
rootWriter.addRow(10, objArray(map(110, "d1.1", 111, "d1.2", 112, "d1.3"), map(120, "d2.2"))).addRow(20, objArray()).addRow(30, objArray(map(310, "d3.1", 311, "d3.2", 313, "d3.4", 317, "d3.9"), map(320, "d4.2"), map(332, "d5.1", 339, "d5.5", 337, "d5.6")));
// Verify the batch
RowSet actual = fixture.wrap(rsLoader.harvest());
RepeatedDictVector repeatedDictVector = (RepeatedDictVector) actual.container().getValueVector(1).getValueVector();
// RepeatedDictVector contains one child - DictVector
MaterializedField dictArrayField = repeatedDictVector.getField();
assertEquals(1, dictArrayField.getChildren().size());
DictVector dictVector = (DictVector) repeatedDictVector.getDataVector();
Iterator<MaterializedField> iter = dictVector.getField().getChildren().iterator();
assertTrue(dictWriter.keyWriter().schema().schema().isEquivalent(iter.next()));
assertTrue(dictWriter.valueWriter().scalar().schema().schema().isEquivalent(iter.next()));
SingleRowSet expected = fixture.rowSetBuilder(schema).addRow(10, objArray(map(110, "d1.1", 111, "d1.2", 112, "d1.3"), map(120, "d2.2"))).addRow(20, objArray()).addRow(30, objArray(map(310, "d3.1", 311, "d3.2", 313, "d3.4", 317, "d3.9"), map(320, "d4.2"), map(332, "d5.1", 339, "d5.5", 337, "d5.6"))).build();
RowSetUtilities.verify(expected, actual);
rsLoader.close();
}
use of org.apache.drill.exec.vector.complex.DictVector in project drill by apache.
the class TestResultSetLoaderDicts method testBasics.
@Test
public void testBasics() {
final TupleMetadata schema = new SchemaBuilder().add("a", MinorType.INT).addDict("d", MinorType.INT).value(MinorType.VARCHAR).resumeSchema().buildSchema();
final ResultSetLoaderImpl.ResultSetOptions options = new ResultSetOptionBuilder().readerSchema(schema).build();
final ResultSetLoader rsLoader = new ResultSetLoaderImpl(fixture.allocator(), options);
assertFalse(rsLoader.isProjectionEmpty());
final RowSetLoader rootWriter = rsLoader.writer();
// Verify structure and schema
assertEquals(4, rsLoader.schemaVersion());
final TupleMetadata actualSchema = rootWriter.tupleSchema();
assertEquals(2, actualSchema.size());
assertTrue(actualSchema.metadata(1).isDict());
assertEquals(2, actualSchema.metadata("d").tupleSchema().size());
assertEquals(2, actualSchema.column("d").getChildren().size());
rsLoader.startBatch();
// Write a row the way that clients will do.
final ScalarWriter aWriter = rootWriter.scalar("a");
final DictWriter dictWriter = rootWriter.dict("d");
final ScalarWriter keyWriter = dictWriter.keyWriter();
final ScalarWriter valueWriter = dictWriter.valueWriter().scalar();
rootWriter.start();
aWriter.setInt(10);
keyWriter.setInt(110);
valueWriter.setString("fred");
dictWriter.save();
keyWriter.setInt(111);
valueWriter.setString("george");
dictWriter.save();
rootWriter.save();
// Write another using the test-time conveniences
rootWriter.addRow(20, map(210, "barney", 211, "bart", 212, "jerry"));
// Harvest the batch
final RowSet actual = fixture.wrap(rsLoader.harvest());
assertEquals(4, rsLoader.schemaVersion());
assertEquals(2, actual.rowCount());
final DictVector dictVector = (DictVector) actual.container().getValueVector(1).getValueVector();
assertEquals(2, dictVector.getAccessor().getValueCount());
// Validate data
final SingleRowSet expected = fixture.rowSetBuilder(schema).addRow(10, map(110, "fred", 111, "george")).addRow(20, map(210, "barney", 211, "bart", 212, "jerry")).build();
RowSetUtilities.verify(expected, actual);
rsLoader.close();
}
use of org.apache.drill.exec.vector.complex.DictVector in project drill by apache.
the class TestResultSetLoaderDicts method testDictAddition.
/**
* Test adding a dict to a loader after writing the first row.
*/
@Test
public void testDictAddition() {
final TupleMetadata schema = new SchemaBuilder().add("a", MinorType.INT).buildSchema();
final ResultSetLoaderImpl.ResultSetOptions options = new ResultSetOptionBuilder().readerSchema(schema).build();
final ResultSetLoader rsLoader = new ResultSetLoaderImpl(fixture.allocator(), options);
assertEquals(1, rsLoader.schemaVersion());
final RowSetLoader rootWriter = rsLoader.writer();
// Start without the dict. Add then add a dict after the first row.
rsLoader.startBatch();
rootWriter.addRow(10);
MaterializedField dictField = SchemaBuilder.columnSchema("d", MinorType.DICT, DataMode.REQUIRED);
dictField.addChild(SchemaBuilder.columnSchema(DictVector.FIELD_KEY_NAME, MinorType.VARCHAR, DataMode.REQUIRED));
dictField.addChild(SchemaBuilder.columnSchema(DictVector.FIELD_VALUE_NAME, MinorType.VARCHAR, DataMode.REQUIRED));
DictColumnMetadata dictMetadata = MetadataUtils.newDict(dictField);
final int dictIndex = rootWriter.addColumn(dictMetadata);
final DictWriter dictWriter = rootWriter.dict(dictIndex);
// Ensure metadata was added
final TupleMetadata actualSchema = rootWriter.tupleSchema();
assertTrue(actualSchema.metadata(1).isDict());
assertEquals(2, actualSchema.metadata("d").tupleSchema().size());
assertEquals(2, actualSchema.column("d").getChildren().size());
assertEquals(2, actualSchema.size());
assertEquals(2, dictWriter.schema().tupleSchema().size());
rootWriter.addRow(20, map("name", "fred", "lastname", "smith")).addRow(30, map("name", "barney", "lastname", "johnson"));
final RowSet actual = fixture.wrap(rsLoader.harvest());
assertEquals(4, rsLoader.schemaVersion());
assertEquals(3, actual.rowCount());
final DictVector dictVector = (DictVector) actual.container().getValueVector(1).getValueVector();
assertEquals(2, dictVector.getField().getChildren().size());
// Validate first batch
final TupleMetadata expectedSchema = new SchemaBuilder().add("a", MinorType.INT).addDict("d", MinorType.VARCHAR).value(MinorType.VARCHAR).resumeSchema().buildSchema();
final SingleRowSet expected = fixture.rowSetBuilder(expectedSchema).addRow(10, map()).addRow(20, map("name", "fred", "lastname", "smith")).addRow(30, map("name", "barney", "lastname", "johnson")).build();
RowSetUtilities.verify(expected, actual);
rsLoader.close();
}
use of org.apache.drill.exec.vector.complex.DictVector in project drill by apache.
the class TestDictVector method testGetByKey.
@Test
public void testGetByKey() {
MaterializedField field = MaterializedField.create("map", DictVector.TYPE);
try (DictVector mapVector = new DictVector(field, allocator, null)) {
mapVector.allocateNew();
List<Map<Object, Object>> maps = Arrays.asList(TestBuilder.mapOfObject(4f, 1L, 5.3f, 2L, 0.3f, 3L, -0.2f, 4L, 102.07f, 5L), TestBuilder.mapOfObject(45f, 6L, 9.2f, 7L), TestBuilder.mapOfObject(4.01f, 8L, 4f, 9L, -2.3f, 10L), TestBuilder.mapOfObject(-2.5f, 11L), TestBuilder.mapOfObject(), TestBuilder.mapOfObject(11f, 12L, 9.73f, 13L, 4f, 14L));
BaseWriter.DictWriter mapWriter = new SingleDictWriter(mapVector, null);
int index = 0;
for (Map<Object, Object> map : maps) {
mapWriter.setPosition(index++);
mapWriter.start();
for (Map.Entry<Object, Object> entry : map.entrySet()) {
mapWriter.startKeyValuePair();
mapWriter.float4(DictVector.FIELD_KEY_NAME).writeFloat4((float) entry.getKey());
mapWriter.bigInt(DictVector.FIELD_VALUE_NAME).writeBigInt((long) entry.getValue());
mapWriter.endKeyValuePair();
}
mapWriter.end();
}
BaseReader.DictReader mapReader = mapVector.getReader();
float key = 4.0f;
// Due to limitations of Calcite, we can pass NameSegment and ArraySegment
// only as String and int respectively hence we need to transform float key
// to String to be able to use it with DictReader which then
// will derive appropriate type internally.
String stringKey = String.valueOf(key);
NullableBigIntHolder valueHolder = new NullableBigIntHolder();
index = 0;
for (Map<Object, Object> map : maps) {
mapReader.setPosition(index++);
mapReader.next();
mapReader.read(stringKey, valueHolder);
assertEquals(map.get(key), valueHolder.isSet == 1 ? valueHolder.value : null);
// reset value holder to reuse it for the next row
valueHolder.isSet = 0;
}
}
}
Aggregations