use of io.netty.buffer.DrillBuf in project drill by axbaretto.
the class VectorAccessibleSerializable method readSv2.
private void readSv2(InputStream input) throws IOException {
if (sv2 != null) {
sv2.clear();
}
final int dataLength = recordCount * SelectionVector2.RECORD_SIZE;
svMode = BatchSchema.SelectionVectorMode.TWO_BYTE;
@SuppressWarnings("resource") DrillBuf buf = allocator.read(dataLength, input);
sv2 = new SelectionVector2(allocator, buf, recordCount);
// SV2 now owns the buffer
buf.release();
}
use of io.netty.buffer.DrillBuf in project drill by axbaretto.
the class PriorityQueueTemplate method generate.
@Override
public void generate() throws SchemaChangeException {
Stopwatch watch = Stopwatch.createStarted();
@SuppressWarnings("resource") final DrillBuf drillBuf = allocator.buffer(4 * queueSize);
finalSv4 = new SelectionVector4(drillBuf, queueSize, 4000);
for (int i = queueSize - 1; i >= 0; i--) {
finalSv4.set(i, pop());
}
logger.debug("Took {} us to generate output of {}", watch.elapsed(TimeUnit.MICROSECONDS), finalSv4.getTotalCount());
}
use of io.netty.buffer.DrillBuf in project drill by axbaretto.
the class PriorityQueueTemplate method resetQueue.
@Override
public void resetQueue(VectorContainer container, SelectionVector4 v4) throws SchemaChangeException {
assert container.getSchema().getSelectionVectorMode() == BatchSchema.SelectionVectorMode.FOUR_BYTE;
BatchSchema schema = container.getSchema();
VectorContainer newContainer = new VectorContainer();
for (MaterializedField field : schema) {
int[] ids = container.getValueVectorId(SchemaPath.getSimplePath(field.getName())).getFieldIds();
newContainer.add(container.getValueAccessorById(field.getValueClass(), ids).getValueVectors());
}
newContainer.buildSchema(BatchSchema.SelectionVectorMode.FOUR_BYTE);
// Cleanup before recreating hyperbatch and sv4.
cleanup();
hyperBatch = new ExpandableHyperContainer(newContainer);
batchCount = hyperBatch.iterator().next().getValueVectors().length;
@SuppressWarnings("resource") final DrillBuf drillBuf = allocator.buffer(4 * (limit + 1));
heapSv4 = new SelectionVector4(drillBuf, limit, Character.MAX_VALUE);
// Reset queue size (most likely to be set to limit).
queueSize = 0;
for (int i = 0; i < v4.getTotalCount(); i++) {
heapSv4.set(i, v4.get(i));
++queueSize;
}
v4.clear();
doSetup(hyperBatch, null);
}
use of io.netty.buffer.DrillBuf in project drill by axbaretto.
the class PriorityQueueTemplate method init.
@Override
public void init(int limit, BufferAllocator allocator, boolean hasSv2) throws SchemaChangeException {
this.limit = limit;
this.allocator = allocator;
@SuppressWarnings("resource") final DrillBuf drillBuf = allocator.buffer(4 * (limit + 1));
heapSv4 = new SelectionVector4(drillBuf, limit, Character.MAX_VALUE);
this.hasSv2 = hasSv2;
}
use of io.netty.buffer.DrillBuf in project drill by axbaretto.
the class MSortTemplate method setup.
@Override
public void setup(final FragmentContext context, final BufferAllocator allocator, final SelectionVector4 vector4, final VectorContainer hyperBatch) throws SchemaChangeException {
// we pass in the local hyperBatch since that is where we'll be reading data.
Preconditions.checkNotNull(vector4);
this.vector4 = vector4.createNewWrapperCurrent();
this.context = context;
vector4.clear();
doSetup(context, hyperBatch, null);
runStarts.add(0);
int batch = 0;
final int totalCount = this.vector4.getTotalCount();
for (int i = 0; i < totalCount; i++) {
final int newBatch = this.vector4.get(i) >>> 16;
if (newBatch == batch) {
continue;
} else if (newBatch == batch + 1) {
runStarts.add(i);
batch = newBatch;
} else {
throw new UnsupportedOperationException(String.format("Missing batch. batch: %d newBatch: %d", batch, newBatch));
}
}
@SuppressWarnings("resource") final DrillBuf drillBuf = allocator.buffer(4 * totalCount);
try {
desiredRecordBatchCount = context.getConfig().getInt(ExecConstants.EXTERNAL_SORT_MSORT_MAX_BATCHSIZE);
} catch (ConfigException.Missing e) {
// value not found, use default value instead
desiredRecordBatchCount = ValueVector.MAX_ROW_COUNT;
}
aux = new SelectionVector4(drillBuf, totalCount, desiredRecordBatchCount);
}
Aggregations