use of org.apache.drill.exec.vector.accessor.ScalarWriter in project drill by axbaretto.
the class TestVariableWidthWriter method testSizeLimit.
/**
* Test resize monitoring. Add a listener to an Varchar writer,
* capture each resize, and refuse a resize when the s
* of the vector exceeds 1 MB. This will trigger an overflow,
* which will throw an exception which we then check for.
*/
@Test
public void testSizeLimit() {
try (VarCharVector vector = allocVector(1000)) {
TestIndex index = new TestIndex();
VarCharColumnWriter writer = makeWriter(vector, index);
writer.bindListener(new ColumnWriterListener() {
// Because assumed array size is 10, so 10 * 1000 = 10,000
// rounded to 16K
int totalAlloc = 16384;
@Override
public void overflowed(ScalarWriter writer) {
throw new IllegalStateException("overflow called");
}
@Override
public boolean canExpand(ScalarWriter writer, int delta) {
// System.out.println("Delta: " + delta);
totalAlloc += delta;
return totalAlloc < 1024 * 1024;
}
});
writer.startWrite();
byte[] value = new byte[423];
Arrays.fill(value, (byte) 'X');
try {
for (int i = 0; ; i++) {
index.index = i;
writer.startRow();
writer.setBytes(value, value.length);
writer.saveRow();
}
} catch (IllegalStateException e) {
assertTrue(e.getMessage().contains("overflow called"));
}
}
}
use of org.apache.drill.exec.vector.accessor.ScalarWriter in project drill by axbaretto.
the class RowSetTest method testMapStructure.
/**
* Test a simple map structure at the top level of a row.
*
* @throws VectorOverflowException should never occur
*/
@Test
public void testMapStructure() {
TupleMetadata schema = new SchemaBuilder().add("a", MinorType.INT).addMap("m").addArray("b", MinorType.INT).resumeSchema().buildSchema();
ExtendableRowSet rowSet = fixture.rowSet(schema);
RowSetWriter writer = rowSet.writer();
// Map and Int
// Test Invariants
assertEquals(ObjectType.SCALAR, writer.column("a").type());
assertEquals(ObjectType.SCALAR, writer.column(0).type());
assertEquals(ObjectType.TUPLE, writer.column("m").type());
assertEquals(ObjectType.TUPLE, writer.column(1).type());
assertSame(writer.column(1).tuple(), writer.tuple(1));
TupleWriter mapWriter = writer.column(1).tuple();
assertEquals(ObjectType.SCALAR, mapWriter.column("b").array().entry().type());
assertEquals(ObjectType.SCALAR, mapWriter.column("b").array().entryType());
ScalarWriter aWriter = writer.column("a").scalar();
ScalarWriter bWriter = writer.column("m").tuple().column("b").array().entry().scalar();
assertSame(bWriter, writer.tuple(1).array(0).scalar());
assertEquals(ValueType.INTEGER, bWriter.valueType());
try {
writer.column(1).scalar();
fail();
} catch (UnsupportedOperationException e) {
// Expected
}
try {
writer.column(1).array();
fail();
} catch (UnsupportedOperationException e) {
// Expected
}
// Write data
aWriter.setInt(10);
bWriter.setInt(11);
bWriter.setInt(12);
writer.save();
aWriter.setInt(20);
bWriter.setInt(21);
bWriter.setInt(22);
writer.save();
aWriter.setInt(30);
bWriter.setInt(31);
bWriter.setInt(32);
writer.save();
// Finish the row set and get a reader.
SingleRowSet actual = writer.done();
RowSetReader reader = actual.reader();
assertEquals(ObjectType.SCALAR, reader.column("a").type());
assertEquals(ObjectType.SCALAR, reader.column(0).type());
assertEquals(ObjectType.TUPLE, reader.column("m").type());
assertEquals(ObjectType.TUPLE, reader.column(1).type());
assertSame(reader.column(1).tuple(), reader.tuple(1));
ScalarReader aReader = reader.column(0).scalar();
TupleReader mReader = reader.column(1).tuple();
assertEquals(ObjectType.SCALAR, mReader.column("b").array().entryType());
ScalarElementReader bReader = mReader.column(0).elements();
assertEquals(ValueType.INTEGER, bReader.valueType());
assertTrue(reader.next());
assertEquals(10, aReader.getInt());
assertEquals(11, bReader.getInt(0));
assertEquals(12, bReader.getInt(1));
assertTrue(reader.next());
assertEquals(20, aReader.getInt());
assertEquals(21, bReader.getInt(0));
assertEquals(22, bReader.getInt(1));
assertTrue(reader.next());
assertEquals(30, aReader.getInt());
assertEquals(31, bReader.getInt(0));
assertEquals(32, bReader.getInt(1));
assertFalse(reader.next());
// Verify that the map accessor's value count was set.
@SuppressWarnings("resource") MapVector mapVector = (MapVector) actual.container().getValueVector(1).getValueVector();
assertEquals(actual.rowCount(), mapVector.getAccessor().getValueCount());
SingleRowSet expected = fixture.rowSetBuilder(schema).addRow(10, objArray(intArray(11, 12))).addRow(20, objArray(intArray(21, 22))).addRow(30, objArray(intArray(31, 32))).build();
new RowSetComparison(expected).verifyAndClearAll(actual);
}
use of org.apache.drill.exec.vector.accessor.ScalarWriter in project drill by axbaretto.
the class RowSetTest method testRepeatedMapStructure.
@Test
public void testRepeatedMapStructure() {
TupleMetadata schema = new SchemaBuilder().add("a", MinorType.INT).addMapArray("m").add("b", MinorType.INT).add("c", MinorType.INT).resumeSchema().buildSchema();
ExtendableRowSet rowSet = fixture.rowSet(schema);
RowSetWriter writer = rowSet.writer();
// Map and Int
// Pick out components and lightly test. (Assumes structure
// tested earlier is still valid, so no need to exhaustively
// test again.)
assertEquals(ObjectType.SCALAR, writer.column("a").type());
assertEquals(ObjectType.ARRAY, writer.column("m").type());
ArrayWriter maWriter = writer.column(1).array();
assertEquals(ObjectType.TUPLE, maWriter.entryType());
TupleWriter mapWriter = maWriter.tuple();
assertEquals(ObjectType.SCALAR, mapWriter.column("b").type());
assertEquals(ObjectType.SCALAR, mapWriter.column("c").type());
ScalarWriter aWriter = writer.column("a").scalar();
ScalarWriter bWriter = mapWriter.scalar("b");
ScalarWriter cWriter = mapWriter.scalar("c");
assertEquals(ValueType.INTEGER, aWriter.valueType());
assertEquals(ValueType.INTEGER, bWriter.valueType());
assertEquals(ValueType.INTEGER, cWriter.valueType());
// Write data
aWriter.setInt(10);
bWriter.setInt(101);
cWriter.setInt(102);
// Advance to next array position
maWriter.save();
bWriter.setInt(111);
cWriter.setInt(112);
maWriter.save();
writer.save();
aWriter.setInt(20);
bWriter.setInt(201);
cWriter.setInt(202);
maWriter.save();
bWriter.setInt(211);
cWriter.setInt(212);
maWriter.save();
writer.save();
aWriter.setInt(30);
bWriter.setInt(301);
cWriter.setInt(302);
maWriter.save();
bWriter.setInt(311);
cWriter.setInt(312);
maWriter.save();
writer.save();
// Finish the row set and get a reader.
SingleRowSet actual = writer.done();
RowSetReader reader = actual.reader();
// Verify reader structure
assertEquals(ObjectType.SCALAR, reader.column("a").type());
assertEquals(ObjectType.ARRAY, reader.column("m").type());
ArrayReader maReader = reader.column(1).array();
assertEquals(ObjectType.TUPLE, maReader.entryType());
TupleReader mapReader = maReader.tuple();
assertEquals(ObjectType.SCALAR, mapReader.column("b").type());
assertEquals(ObjectType.SCALAR, mapReader.column("c").type());
ScalarReader aReader = reader.column("a").scalar();
ScalarReader bReader = mapReader.scalar("b");
ScalarReader cReader = mapReader.scalar("c");
assertEquals(ValueType.INTEGER, aReader.valueType());
assertEquals(ValueType.INTEGER, bReader.valueType());
assertEquals(ValueType.INTEGER, cReader.valueType());
// Row 1: use index accessors
assertTrue(reader.next());
assertEquals(10, aReader.getInt());
TupleReader ixReader = maReader.tuple(0);
assertEquals(101, ixReader.scalar(0).getInt());
assertEquals(102, ixReader.scalar(1).getInt());
ixReader = maReader.tuple(1);
assertEquals(111, ixReader.scalar(0).getInt());
assertEquals(112, ixReader.scalar(1).getInt());
// Row 2: use common accessor with explicit positioning,
// but access scalars through the map reader.
assertTrue(reader.next());
assertEquals(20, aReader.getInt());
maReader.setPosn(0);
assertEquals(201, mapReader.scalar(0).getInt());
assertEquals(202, mapReader.scalar(1).getInt());
maReader.setPosn(1);
assertEquals(211, mapReader.scalar(0).getInt());
assertEquals(212, mapReader.scalar(1).getInt());
// Row 3: use common accessor for scalars
assertTrue(reader.next());
assertEquals(30, aReader.getInt());
maReader.setPosn(0);
assertEquals(301, bReader.getInt());
assertEquals(302, cReader.getInt());
maReader.setPosn(1);
assertEquals(311, bReader.getInt());
assertEquals(312, cReader.getInt());
assertFalse(reader.next());
// Verify that the map accessor's value count was set.
@SuppressWarnings("resource") RepeatedMapVector mapVector = (RepeatedMapVector) actual.container().getValueVector(1).getValueVector();
assertEquals(3, mapVector.getAccessor().getValueCount());
// Verify the readers and writers again using the testing tools.
SingleRowSet expected = fixture.rowSetBuilder(schema).addRow(10, objArray(objArray(101, 102), objArray(111, 112))).addRow(20, objArray(objArray(201, 202), objArray(211, 212))).addRow(30, objArray(objArray(301, 302), objArray(311, 312))).build();
new RowSetComparison(expected).verifyAndClearAll(actual);
}
use of org.apache.drill.exec.vector.accessor.ScalarWriter in project drill by axbaretto.
the class TestFillEmpties method doFillEmptiesScalar.
private void doFillEmptiesScalar(MajorType majorType) {
TupleMetadata schema = new SchemaBuilder().add("a", majorType).buildSchema();
ExtendableRowSet rs = fixture.rowSet(schema);
RowSetWriter writer = rs.writer();
ScalarWriter colWriter = writer.scalar(0);
ValueType valueType = colWriter.valueType();
boolean nullable = majorType.getMode() == DataMode.OPTIONAL;
for (int i = 0; i < ROW_COUNT; i++) {
if (i % 5 == 0) {
colWriter.setObject(RowSetUtilities.testDataFromInt(valueType, majorType, i));
}
writer.save();
}
SingleRowSet result = writer.done();
RowSetReader reader = result.reader();
ScalarReader colReader = reader.scalar(0);
MinorType type = majorType.getMinorType();
boolean isVariable = (type == MinorType.VARCHAR || type == MinorType.VAR16CHAR || type == MinorType.VARBINARY);
for (int i = 0; i < ROW_COUNT; i++) {
assertTrue(reader.next());
if (i % 5 != 0) {
if (nullable) {
// Nullable types fill with nulls.
assertTrue(colReader.isNull());
continue;
}
if (isVariable) {
// Variable width types fill with a zero-length value.
assertEquals(0, colReader.getBytes().length);
continue;
}
}
// All other types fill with zero-bytes, interpreted as some form
// of zero for each type.
Object actual = colReader.getObject();
Object expected = RowSetUtilities.testDataFromInt(valueType, majorType, i % 5 == 0 ? i : 0);
RowSetUtilities.assertEqualValues(majorType.toString().replace('\n', ' ') + "[" + i + "]", valueType, expected, actual);
}
result.clear();
}
use of org.apache.drill.exec.vector.accessor.ScalarWriter in project drill by apache.
the class TestResultSetLoaderRepeatedList method test2DOverflow.
@Test
public void test2DOverflow() {
final TupleMetadata schema = new SchemaBuilder().add("id", MinorType.INT).addRepeatedList("list2").addArray(MinorType.VARCHAR).resumeSchema().buildSchema();
final ResultSetLoaderImpl.ResultSetOptions options = new ResultSetOptionBuilder().rowCountLimit(ValueVector.MAX_ROW_COUNT).readerSchema(schema).build();
final ResultSetLoader rsLoader = new ResultSetLoaderImpl(fixture.allocator(), options);
final RowSetLoader writer = rsLoader.writer();
// Fill the batch with enough data to cause overflow.
// Data must be large enough to cause overflow before 64K rows
// Make a bit bigger to overflow early.
final int outerSize = 7;
final int innerSize = 5;
final int strLength = ValueVector.MAX_BUFFER_SIZE / ValueVector.MAX_ROW_COUNT / outerSize / innerSize + 20;
final byte[] value = new byte[strLength - 6];
Arrays.fill(value, (byte) 'X');
final String strValue = new String(value, Charsets.UTF_8);
int rowCount = 0;
int elementCount = 0;
final ArrayWriter outerWriter = writer.array(1);
final ArrayWriter innerWriter = outerWriter.array();
final ScalarWriter elementWriter = innerWriter.scalar();
rsLoader.startBatch();
while (!writer.isFull()) {
writer.start();
writer.scalar(0).setInt(rowCount);
for (int j = 0; j < outerSize; j++) {
for (int k = 0; k < innerSize; k++) {
elementWriter.setString(String.format("%s%06d", strValue, elementCount));
elementCount++;
}
outerWriter.save();
}
writer.save();
rowCount++;
}
// Number of rows should be driven by vector size.
// Our row count should include the overflow row
final int expectedCount = ValueVector.MAX_BUFFER_SIZE / (strLength * innerSize * outerSize);
assertEquals(expectedCount + 1, rowCount);
// Loader's row count should include only "visible" rows
assertEquals(expectedCount, writer.rowCount());
// Total count should include invisible and look-ahead rows.
assertEquals(expectedCount + 1, rsLoader.totalRowCount());
// Result should exclude the overflow row
RowSet result = fixture.wrap(rsLoader.harvest());
assertEquals(expectedCount, result.rowCount());
// Verify the data.
RowSetReader reader = result.reader();
ArrayReader outerReader = reader.array(1);
ArrayReader innerReader = outerReader.array();
ScalarReader strReader = innerReader.scalar();
int readRowCount = 0;
int readElementCount = 0;
while (reader.next()) {
assertEquals(readRowCount, reader.scalar(0).getInt());
for (int i = 0; i < outerSize; i++) {
assertTrue(outerReader.next());
for (int j = 0; j < innerSize; j++) {
assertTrue(innerReader.next());
assertEquals(String.format("%s%06d", strValue, readElementCount), strReader.getString());
readElementCount++;
}
assertFalse(innerReader.next());
}
assertFalse(outerReader.next());
readRowCount++;
}
assertEquals(readRowCount, result.rowCount());
result.clear();
// Write a few more rows to verify the overflow row.
rsLoader.startBatch();
for (int i = 0; i < 1000; i++) {
writer.start();
writer.scalar(0).setInt(rowCount);
for (int j = 0; j < outerSize; j++) {
for (int k = 0; k < innerSize; k++) {
elementWriter.setString(String.format("%s%06d", strValue, elementCount));
elementCount++;
}
outerWriter.save();
}
writer.save();
rowCount++;
}
result = fixture.wrap(rsLoader.harvest());
assertEquals(1001, result.rowCount());
final int startCount = readRowCount;
reader = result.reader();
outerReader = reader.array(1);
innerReader = outerReader.array();
strReader = innerReader.scalar();
while (reader.next()) {
assertEquals(readRowCount, reader.scalar(0).getInt());
for (int i = 0; i < outerSize; i++) {
assertTrue(outerReader.next());
for (int j = 0; j < innerSize; j++) {
assertTrue(innerReader.next());
elementWriter.setString(String.format("%s%06d", strValue, readElementCount));
assertEquals(String.format("%s%06d", strValue, readElementCount), strReader.getString());
readElementCount++;
}
assertFalse(innerReader.next());
}
assertFalse(outerReader.next());
readRowCount++;
}
assertEquals(readRowCount - startCount, result.rowCount());
result.clear();
rsLoader.close();
}
Aggregations