use of org.apache.drill.exec.vector.accessor.DictWriter in project drill by apache.
the class TestRowSet method testDictStructure.
@Test
public void testDictStructure() {
final String dictName = "d";
final TupleMetadata schema = new SchemaBuilder().add("id", MinorType.INT).addDict(dictName, MinorType.INT).value(// required int
MinorType.VARCHAR).resumeSchema().buildSchema();
final ExtendableRowSet rowSet = fixture.rowSet(schema);
final RowSetWriter writer = rowSet.writer();
// Dict
// Pick out components and lightly test. (Assumes structure
// tested earlier is still valid, so no need to exhaustively
// test again.)
assertEquals(ObjectType.ARRAY, writer.column(dictName).type());
assertTrue(writer.column(dictName).schema().isDict());
final ScalarWriter idWriter = writer.column(0).scalar();
final DictWriter dictWriter = writer.column(1).dict();
assertEquals(ValueType.INTEGER, dictWriter.keyType());
assertEquals(ObjectType.SCALAR, dictWriter.valueType());
final ScalarWriter keyWriter = dictWriter.keyWriter();
final ScalarWriter valueWriter = dictWriter.valueWriter().scalar();
assertEquals(ValueType.INTEGER, keyWriter.valueType());
assertEquals(ValueType.STRING, valueWriter.valueType());
// Write data
idWriter.setInt(1);
keyWriter.setInt(11);
valueWriter.setString("a");
// Advance to next entry position
dictWriter.save();
keyWriter.setInt(12);
valueWriter.setString("b");
dictWriter.save();
writer.save();
idWriter.setInt(2);
keyWriter.setInt(21);
valueWriter.setString("c");
dictWriter.save();
writer.save();
idWriter.setInt(3);
keyWriter.setInt(31);
valueWriter.setString("d");
dictWriter.save();
keyWriter.setInt(32);
valueWriter.setString("e");
dictWriter.save();
writer.save();
// Finish the row set and get a reader.
final SingleRowSet actual = writer.done();
final RowSetReader reader = actual.reader();
// Verify reader structure
assertEquals(ObjectType.ARRAY, reader.column(dictName).type());
final DictReader dictReader = reader.dict(1);
assertEquals(ObjectType.ARRAY, dictReader.type());
assertEquals(ValueType.INTEGER, dictReader.keyColumnType());
assertEquals(ObjectType.SCALAR, dictReader.valueColumnType());
// Row 1: get value reader with its position set to entry corresponding to a key
assertTrue(reader.next());
// dict itself is not null
assertFalse(dictReader.isNull());
dictReader.getAsString();
final KeyAccessor keyAccessor = dictReader.keyAccessor();
final ScalarReader valueReader = dictReader.valueReader().scalar();
assertTrue(keyAccessor.find(12));
assertEquals("b", valueReader.getString());
assertTrue(keyAccessor.find(11));
assertEquals("a", valueReader.getString());
// compare entire dict
Map<Object, Object> map = map(11, "a", 12, "b");
assertEquals(map, dictReader.getObject());
// Row 2
assertTrue(reader.next());
// the dict does not contain an entry with the key
assertFalse(keyAccessor.find(22));
assertTrue(keyAccessor.find(21));
assertEquals("c", valueReader.getString());
map = map(21, "c");
assertEquals(map, dictReader.getObject());
// Row 3
assertTrue(reader.next());
assertTrue(keyAccessor.find(31));
assertEquals("d", valueReader.getString());
assertFalse(keyAccessor.find(33));
assertTrue(keyAccessor.find(32));
assertEquals("e", valueReader.getString());
map = map(31, "d", 32, "e");
assertEquals(map, dictReader.getObject());
assertFalse(reader.next());
// Verify that the dict accessor's value count was set.
final DictVector dictVector = (DictVector) actual.container().getValueVector(1).getValueVector();
assertEquals(3, dictVector.getAccessor().getValueCount());
final SingleRowSet expected = fixture.rowSetBuilder(schema).addRow(1, map(11, "a", 12, "b")).addRow(2, map(21, "c")).addRow(3, map(31, "d", 32, "e")).build();
RowSetUtilities.verify(expected, actual);
}
use of org.apache.drill.exec.vector.accessor.DictWriter in project drill by apache.
the class TestRowSet method testDictStructureMapValue.
@Test
public void testDictStructureMapValue() {
final String dictName = "d";
final int bScale = 1;
final TupleMetadata schema = new SchemaBuilder().add("id", MinorType.INT).addDict(dictName, MinorType.INT).mapValue().add("a", MinorType.INT).add("b", MinorType.VARDECIMAL, 8, bScale).resumeDict().resumeSchema().buildSchema();
final ExtendableRowSet rowSet = fixture.rowSet(schema);
final RowSetWriter writer = rowSet.writer();
// Dict with Map value
assertEquals(ObjectType.ARRAY, writer.column(dictName).type());
final ScalarWriter idWriter = writer.scalar(0);
final DictWriter dictWriter = writer.column(1).dict();
assertEquals(ValueType.INTEGER, dictWriter.keyType());
assertEquals(ObjectType.TUPLE, dictWriter.valueType());
final ScalarWriter keyWriter = dictWriter.keyWriter();
final TupleWriter valueWriter = dictWriter.valueWriter().tuple();
assertEquals(ValueType.INTEGER, keyWriter.valueType());
ScalarWriter aWriter = valueWriter.scalar("a");
ScalarWriter bWriter = valueWriter.scalar("b");
assertEquals(ValueType.INTEGER, aWriter.valueType());
assertEquals(ValueType.DECIMAL, bWriter.valueType());
// Write data
idWriter.setInt(1);
keyWriter.setInt(11);
aWriter.setInt(10);
bWriter.setDecimal(BigDecimal.valueOf(1));
// advance to next entry position
dictWriter.save();
keyWriter.setInt(12);
aWriter.setInt(11);
bWriter.setDecimal(BigDecimal.valueOf(2));
dictWriter.save();
writer.save();
idWriter.setInt(2);
keyWriter.setInt(21);
aWriter.setInt(20);
bWriter.setDecimal(BigDecimal.valueOf(3));
dictWriter.save();
writer.save();
idWriter.setInt(3);
keyWriter.setInt(31);
aWriter.setInt(30);
bWriter.setDecimal(BigDecimal.valueOf(4));
dictWriter.save();
keyWriter.setInt(32);
aWriter.setInt(31);
bWriter.setDecimal(BigDecimal.valueOf(5));
dictWriter.save();
keyWriter.setInt(33);
aWriter.setInt(32);
bWriter.setDecimal(BigDecimal.valueOf(6));
dictWriter.save();
writer.save();
// Finish the row set and get a reader.
final SingleRowSet actual = writer.done();
final RowSetReader reader = actual.reader();
// Verify reader structure
assertEquals(ObjectType.ARRAY, reader.column(dictName).type());
final DictReader dictReader = reader.dict(1);
assertEquals(ObjectType.ARRAY, dictReader.type());
assertEquals(ValueType.INTEGER, dictReader.keyColumnType());
assertEquals(ObjectType.TUPLE, dictReader.valueColumnType());
final KeyAccessor keyAccessor = dictReader.keyAccessor();
final TupleReader valueReader = dictReader.valueReader().tuple();
// Row 1: get value reader with its position set to entry corresponding to a key
assertTrue(reader.next());
// dict itself is not null
assertFalse(dictReader.isNull());
assertTrue(keyAccessor.find(12));
assertEquals(11, valueReader.scalar("a").getInt());
assertEquals(BigDecimal.valueOf(2.0), valueReader.scalar("b").getDecimal());
// MapReader#getObject() returns a List containing values for each column
// rather than mapping of column name to it's value, hence List is expected for Dict's value.
Map<Object, Object> map = map(11, Arrays.asList(10, BigDecimal.valueOf(1.0)), 12, Arrays.asList(11, BigDecimal.valueOf(2.0)));
assertEquals(map, dictReader.getObject());
// Row 2
assertTrue(reader.next());
assertFalse(keyAccessor.find(222));
assertTrue(keyAccessor.find(21));
assertEquals(Arrays.asList(20, BigDecimal.valueOf(3.0)), valueReader.getObject());
map = map(21, Arrays.asList(20, BigDecimal.valueOf(3.0)));
assertEquals(map, dictReader.getObject());
// Row 3
assertTrue(reader.next());
assertTrue(keyAccessor.find(32));
assertFalse(valueReader.isNull());
assertEquals(31, valueReader.scalar("a").getInt());
assertEquals(BigDecimal.valueOf(5.0), valueReader.scalar("b").getDecimal());
assertTrue(keyAccessor.find(31));
assertEquals(30, valueReader.scalar("a").getInt());
assertEquals(BigDecimal.valueOf(4.0), valueReader.scalar("b").getDecimal());
assertFalse(keyAccessor.find(404));
map = map(31, Arrays.asList(30, BigDecimal.valueOf(4.0)), 32, Arrays.asList(31, BigDecimal.valueOf(5.0)), 33, Arrays.asList(32, BigDecimal.valueOf(6.0)));
assertEquals(map, dictReader.getObject());
assertFalse(reader.next());
// Verify that the dict accessor's value count was set.
final DictVector dictVector = (DictVector) actual.container().getValueVector(1).getValueVector();
assertEquals(3, dictVector.getAccessor().getValueCount());
final SingleRowSet expected = fixture.rowSetBuilder(schema).addRow(1, map(11, objArray(10, BigDecimal.valueOf(1.0)), 12, objArray(11, BigDecimal.valueOf(2.0)))).addRow(2, map(21, objArray(20, BigDecimal.valueOf(3.0)))).addRow(3, map(31, objArray(30, BigDecimal.valueOf(4.0)), 32, objArray(31, BigDecimal.valueOf(5.0)), 33, objArray(32, BigDecimal.valueOf(6.0)))).build();
RowSetUtilities.verify(expected, actual);
}
use of org.apache.drill.exec.vector.accessor.DictWriter in project drill by apache.
the class TestResultSetLoaderDictArray method testKeyOverflow.
@Test
public void testKeyOverflow() {
TupleMetadata schema = new SchemaBuilder().addDictArray("d", MinorType.VARCHAR).value(MinorType.INT).resumeSchema().buildSchema();
ResultSetLoaderImpl.ResultSetOptions options = new ResultSetOptionBuilder().rowCountLimit(ValueVector.MAX_ROW_COUNT).readerSchema(schema).build();
ResultSetLoader rsLoader = new ResultSetLoaderImpl(fixture.allocator(), options);
RowSetLoader rootWriter = rsLoader.writer();
rsLoader.startBatch();
byte[] key = new byte[523];
Arrays.fill(key, (byte) 'X');
// number of dicts in each row
int arraySize = 3;
// number of entries in each dict
int dictSize = 1;
// Number of rows should be driven by vector size.
// Our row count should include the overflow row
ArrayWriter arrayDictWriter = rootWriter.array(0);
DictWriter dictWriter = arrayDictWriter.dict();
ScalarWriter keyWriter = dictWriter.keyWriter();
ScalarWriter valueWriter = dictWriter.valueWriter().scalar();
int expectedCount = ValueVector.MAX_BUFFER_SIZE / (key.length * dictSize * arraySize);
// System.out.println("expectedCoutn: " + expectedCount);
{
int count = 0;
while (!rootWriter.isFull()) {
rootWriter.start();
for (int i = 0; i < arraySize; i++) {
for (int j = 0; j < dictSize; j++) {
keyWriter.setBytes(key, key.length);
// acts as a placeholder, the actual value is not important
valueWriter.setInt(0);
// not necessary for scalars, just for completeness
dictWriter.save();
}
arrayDictWriter.save();
}
rootWriter.save();
count++;
}
assertEquals(expectedCount + 1, count);
// System.out.println("count: " + count);
// Loader's row count should include only "visible" rows
assertEquals(expectedCount, rootWriter.rowCount());
// Total count should include invisible and look-ahead rows.
assertEquals(expectedCount + 1, rsLoader.totalRowCount());
// Result should exclude the overflow row
VectorContainer container = rsLoader.harvest();
BatchValidator.validate(container);
RowSet result = fixture.wrap(container);
assertEquals(expectedCount, result.rowCount());
result.clear();
}
// Next batch should start with the overflow row
{
rsLoader.startBatch();
assertEquals(1, rootWriter.rowCount());
assertEquals(expectedCount + 1, rsLoader.totalRowCount());
VectorContainer container = rsLoader.harvest();
BatchValidator.validate(container);
RowSet result = fixture.wrap(container);
assertEquals(1, result.rowCount());
result.clear();
}
rsLoader.close();
}
use of org.apache.drill.exec.vector.accessor.DictWriter in project drill by apache.
the class TestResultSetLoaderDictArray method testBasics.
@Test
public void testBasics() {
TupleMetadata schema = new SchemaBuilder().add("a", MinorType.INT).addDictArray("d", MinorType.INT).value(MinorType.VARCHAR).resumeSchema().buildSchema();
ResultSetLoaderImpl.ResultSetOptions options = new ResultSetOptionBuilder().readerSchema(schema).build();
ResultSetLoader rsLoader = new ResultSetLoaderImpl(fixture.allocator(), options);
RowSetLoader rootWriter = rsLoader.writer();
// Verify structure and schema
TupleMetadata actualSchema = rootWriter.tupleSchema();
assertEquals(2, actualSchema.size());
assertTrue(actualSchema.metadata(1).isArray());
assertTrue(actualSchema.metadata(1).isDict());
assertEquals(2, actualSchema.metadata("d").tupleSchema().size());
assertEquals(2, actualSchema.column("d").getChildren().size());
DictWriter dictWriter = rootWriter.array("d").dict();
assertSame(actualSchema.metadata("d").tupleSchema(), dictWriter.schema().tupleSchema());
// Write a couple of rows with arrays.
rsLoader.startBatch();
rootWriter.addRow(10, objArray(map(110, "d1.1", 111, "d1.2", 112, "d1.3"), map(120, "d2.2"))).addRow(20, objArray()).addRow(30, objArray(map(310, "d3.1", 311, "d3.2", 313, "d3.4", 317, "d3.9"), map(320, "d4.2"), map(332, "d5.1", 339, "d5.5", 337, "d5.6")));
// Verify the batch
RowSet actual = fixture.wrap(rsLoader.harvest());
RepeatedDictVector repeatedDictVector = (RepeatedDictVector) actual.container().getValueVector(1).getValueVector();
// RepeatedDictVector contains one child - DictVector
MaterializedField dictArrayField = repeatedDictVector.getField();
assertEquals(1, dictArrayField.getChildren().size());
DictVector dictVector = (DictVector) repeatedDictVector.getDataVector();
Iterator<MaterializedField> iter = dictVector.getField().getChildren().iterator();
assertTrue(dictWriter.keyWriter().schema().schema().isEquivalent(iter.next()));
assertTrue(dictWriter.valueWriter().scalar().schema().schema().isEquivalent(iter.next()));
SingleRowSet expected = fixture.rowSetBuilder(schema).addRow(10, objArray(map(110, "d1.1", 111, "d1.2", 112, "d1.3"), map(120, "d2.2"))).addRow(20, objArray()).addRow(30, objArray(map(310, "d3.1", 311, "d3.2", 313, "d3.4", 317, "d3.9"), map(320, "d4.2"), map(332, "d5.1", 339, "d5.5", 337, "d5.6"))).build();
RowSetUtilities.verify(expected, actual);
rsLoader.close();
}
use of org.apache.drill.exec.vector.accessor.DictWriter in project drill by apache.
the class TestResultSetLoaderDictArray method testValueOverflow.
@Test
public void testValueOverflow() {
TupleMetadata schema = new SchemaBuilder().addDictArray("d", MinorType.INT).value(MinorType.VARCHAR).resumeSchema().buildSchema();
ResultSetLoaderImpl.ResultSetOptions options = new ResultSetOptionBuilder().rowCountLimit(ValueVector.MAX_ROW_COUNT).readerSchema(schema).build();
ResultSetLoader rsLoader = new ResultSetLoaderImpl(fixture.allocator(), options);
RowSetLoader rootWriter = rsLoader.writer();
rsLoader.startBatch();
byte[] value = new byte[523];
Arrays.fill(value, (byte) 'X');
// number of dicts in each row; array size is the same for every row to find expected row count easier
int arraySize = 2;
// number of entries in each dict
int dictSize = 4;
// Number of rows should be driven by vector size.
// Our row count should include the overflow row
ArrayWriter arrayDictWriter = rootWriter.array(0);
DictWriter dictWriter = arrayDictWriter.dict();
ScalarWriter keyWriter = dictWriter.keyWriter();
ScalarWriter valueWriter = dictWriter.valueWriter().scalar();
int expectedCount = ValueVector.MAX_BUFFER_SIZE / (value.length * dictSize * arraySize);
{
int count = 0;
while (!rootWriter.isFull()) {
rootWriter.start();
for (int i = 0; i < arraySize; i++) {
for (int j = 0; j < dictSize; j++) {
// acts as a placeholder, the actual value is not important
keyWriter.setInt(0);
valueWriter.setBytes(value, value.length);
// not necessary for scalars, just for completeness
dictWriter.save();
}
arrayDictWriter.save();
}
rootWriter.save();
count++;
}
assertEquals(expectedCount + 1, count);
// Loader's row count should include only "visible" rows
assertEquals(expectedCount, rootWriter.rowCount());
// Total count should include invisible and look-ahead rows.
assertEquals(expectedCount + 1, rsLoader.totalRowCount());
// Result should exclude the overflow row
VectorContainer container = rsLoader.harvest();
BatchValidator.validate(container);
RowSet result = fixture.wrap(container);
assertEquals(expectedCount, result.rowCount());
result.clear();
}
// Next batch should start with the overflow row
{
rsLoader.startBatch();
assertEquals(1, rootWriter.rowCount());
assertEquals(expectedCount + 1, rsLoader.totalRowCount());
VectorContainer container = rsLoader.harvest();
BatchValidator.validate(container);
RowSet result = fixture.wrap(container);
assertEquals(1, result.rowCount());
result.clear();
}
rsLoader.close();
}
Aggregations