use of org.apache.drill.exec.record.BatchSchema in project drill by apache.
the class RowSetTest method TestTopScalarArray.
@Test
public void TestTopScalarArray() {
BatchSchema batchSchema = new SchemaBuilder().add("c", MinorType.INT).addArray("a", MinorType.INT).build();
ExtendableRowSet rs1 = fixture.rowSet(batchSchema);
RowSetWriter writer = rs1.writer();
writer.column(0).setInt(10);
ArrayWriter array = writer.column(1).array();
array.setInt(100);
array.setInt(110);
writer.save();
writer.column(0).setInt(20);
array = writer.column(1).array();
array.setInt(200);
array.setInt(120);
array.setInt(220);
writer.save();
writer.column(0).setInt(30);
writer.save();
writer.done();
RowSetReader reader = rs1.reader();
assertTrue(reader.next());
assertEquals(10, reader.column(0).getInt());
ArrayReader arrayReader = reader.column(1).array();
assertEquals(2, arrayReader.size());
assertEquals(100, arrayReader.getInt(0));
assertEquals(110, arrayReader.getInt(1));
assertTrue(reader.next());
assertEquals(20, reader.column(0).getInt());
arrayReader = reader.column(1).array();
assertEquals(3, arrayReader.size());
assertEquals(200, arrayReader.getInt(0));
assertEquals(120, arrayReader.getInt(1));
assertEquals(220, arrayReader.getInt(2));
assertTrue(reader.next());
assertEquals(30, reader.column(0).getInt());
arrayReader = reader.column(1).array();
assertEquals(0, arrayReader.size());
assertFalse(reader.next());
SingleRowSet rs2 = fixture.rowSetBuilder(batchSchema).add(10, new int[] { 100, 110 }).add(20, new int[] { 200, 120, 220 }).add(30, null).build();
new RowSetComparison(rs1).verifyAndClear(rs2);
}
use of org.apache.drill.exec.record.BatchSchema in project drill by apache.
the class RowSetTest method testDoubleRW.
private void testDoubleRW() {
BatchSchema batchSchema = new SchemaBuilder().add("col", MinorType.FLOAT8).build();
SingleRowSet rs = fixture.rowSetBuilder(batchSchema).add(0D).add(Double.MAX_VALUE).add(Double.MIN_VALUE).build();
RowSetReader reader = rs.reader();
assertTrue(reader.next());
assertEquals(0, reader.column(0).getDouble(), 0.000001);
assertTrue(reader.next());
assertEquals(Double.MAX_VALUE, reader.column(0).getDouble(), 0.000001);
assertTrue(reader.next());
assertEquals(Double.MIN_VALUE, reader.column(0).getDouble(), 0.000001);
assertFalse(reader.next());
rs.clear();
}
use of org.apache.drill.exec.record.BatchSchema in project drill by apache.
the class RowSetTest method testMap.
@Test
public void testMap() {
BatchSchema batchSchema = new SchemaBuilder().add("a", MinorType.INT).addMap("b").add("c", MinorType.INT).add("d", MinorType.INT).buildMap().build();
SingleRowSet rs = fixture.rowSetBuilder(batchSchema).add(10, 20, 30).add(40, 50, 60).build();
RowSetReader reader = rs.reader();
assertTrue(reader.next());
assertEquals(10, reader.column(0).getInt());
assertEquals(20, reader.column(1).getInt());
assertEquals(30, reader.column(2).getInt());
assertEquals(10, reader.column("a").getInt());
assertEquals(30, reader.column("b.d").getInt());
assertTrue(reader.next());
assertEquals(40, reader.column(0).getInt());
assertEquals(50, reader.column(1).getInt());
assertEquals(60, reader.column(2).getInt());
assertFalse(reader.next());
rs.clear();
}
use of org.apache.drill.exec.record.BatchSchema in project drill by apache.
the class SortRecordBatchBuilder method build.
public void build(FragmentContext context, VectorContainer outputContainer) throws SchemaChangeException {
outputContainer.clear();
if (batches.keySet().size() > 1) {
throw new SchemaChangeException("Sort currently only supports a single schema.");
}
if (batches.size() > Character.MAX_VALUE) {
throw new SchemaChangeException("Sort cannot work on more than %d batches at a time.", (int) Character.MAX_VALUE);
}
if (batches.keys().size() < 1) {
assert false : "Invalid to have an empty set of batches with no schemas.";
}
final DrillBuf svBuffer = reservation.allocateBuffer();
if (svBuffer == null) {
throw new OutOfMemoryError("Failed to allocate direct memory for SV4 vector in SortRecordBatchBuilder.");
}
sv4 = new SelectionVector4(svBuffer, recordCount, Character.MAX_VALUE);
BatchSchema schema = batches.keySet().iterator().next();
List<RecordBatchData> data = batches.get(schema);
// now we're going to generate the sv4 pointers
switch(schema.getSelectionVectorMode()) {
case NONE:
{
int index = 0;
int recordBatchId = 0;
for (RecordBatchData d : data) {
for (int i = 0; i < d.getRecordCount(); i++, index++) {
sv4.set(index, recordBatchId, i);
}
recordBatchId++;
}
break;
}
case TWO_BYTE:
{
int index = 0;
int recordBatchId = 0;
for (RecordBatchData d : data) {
for (int i = 0; i < d.getRecordCount(); i++, index++) {
sv4.set(index, recordBatchId, (int) d.getSv2().getIndex(i));
}
// might as well drop the selection vector since we'll stop using it now.
d.getSv2().clear();
recordBatchId++;
}
break;
}
default:
throw new UnsupportedOperationException();
}
// next, we'll create lists of each of the vector types.
ArrayListMultimap<MaterializedField, ValueVector> vectors = ArrayListMultimap.create();
for (RecordBatchData rbd : batches.values()) {
for (ValueVector v : rbd.getVectors()) {
vectors.put(v.getField(), v);
}
}
for (MaterializedField f : schema) {
List<ValueVector> v = vectors.get(f);
outputContainer.addHyperList(v, false);
}
outputContainer.buildSchema(SelectionVectorMode.FOUR_BYTE);
}
use of org.apache.drill.exec.record.BatchSchema in project drill by apache.
the class ProducerConsumerBatch method load.
private boolean load(final RecordBatchData batch) {
final VectorContainer newContainer = batch.getContainer();
if (schema != null && newContainer.getSchema().equals(schema)) {
container.zeroVectors();
final BatchSchema schema = container.getSchema();
for (int i = 0; i < container.getNumberOfColumns(); i++) {
final MaterializedField field = schema.getColumn(i);
final MajorType type = field.getType();
final ValueVector vOut = container.getValueAccessorById(TypeHelper.getValueVectorClass(type.getMinorType(), type.getMode()), container.getValueVectorId(SchemaPath.getSimplePath(field.getPath())).getFieldIds()).getValueVector();
final ValueVector vIn = newContainer.getValueAccessorById(TypeHelper.getValueVectorClass(type.getMinorType(), type.getMode()), newContainer.getValueVectorId(SchemaPath.getSimplePath(field.getPath())).getFieldIds()).getValueVector();
final TransferPair tp = vIn.makeTransferPair(vOut);
tp.transfer();
}
return false;
} else {
container.clear();
for (final VectorWrapper<?> w : newContainer) {
container.add(w.getValueVector());
}
container.buildSchema(SelectionVectorMode.NONE);
schema = container.getSchema();
return true;
}
}
Aggregations