use of io.netty.buffer.DrillBuf in project drill by apache.
the class PriorityQueueTemplate method init.
@Override
public void init(int limit, FragmentContext context, BufferAllocator allocator, boolean hasSv2) throws SchemaChangeException {
this.limit = limit;
this.context = context;
this.allocator = allocator;
@SuppressWarnings("resource") final DrillBuf drillBuf = allocator.buffer(4 * (limit + 1));
heapSv4 = new SelectionVector4(drillBuf, limit, Character.MAX_VALUE);
this.hasSv2 = hasSv2;
}
use of io.netty.buffer.DrillBuf in project drill by apache.
the class WebUserConnection method sendData.
@Override
public void sendData(RpcOutcomeListener<Ack> listener, QueryWritableBatch result) {
// Check if there is any data or not. There can be overflow here but DrillBuf doesn't support allocating with
// bytes in long. Hence we are just preserving the earlier behavior and logging debug log for the case.
final int dataByteCount = (int) result.getByteCount();
if (dataByteCount <= 0) {
if (logger.isDebugEnabled()) {
logger.debug("Either no data received in this batch or there is BufferOverflow in dataByteCount: {}", dataByteCount);
}
listener.success(Acks.OK, null);
return;
}
// If here that means there is some data for sure. Create a ByteBuf with all the data in it.
final int rows = result.getHeader().getRowCount();
final BufferAllocator allocator = webSessionResources.getAllocator();
final DrillBuf bufferWithData = allocator.buffer(dataByteCount);
try {
final ByteBuf[] resultDataBuffers = result.getBuffers();
for (final ByteBuf buffer : resultDataBuffers) {
bufferWithData.writeBytes(buffer);
buffer.release();
}
final RecordBatchLoader loader = new RecordBatchLoader(allocator);
try {
loader.load(result.getHeader().getDef(), bufferWithData);
// SchemaChangeException, so check/clean catch clause below.
for (int i = 0; i < loader.getSchema().getFieldCount(); ++i) {
columns.add(loader.getSchema().getColumn(i).getPath());
}
for (int i = 0; i < rows; ++i) {
final Map<String, String> record = Maps.newHashMap();
for (VectorWrapper<?> vw : loader) {
final String field = vw.getValueVector().getMetadata().getNamePart().getName();
final Accessor accessor = vw.getValueVector().getAccessor();
final Object value = i < accessor.getValueCount() ? accessor.getObject(i) : null;
final String display = value == null ? null : value.toString();
record.put(field, display);
}
results.add(record);
}
} finally {
loader.clear();
}
} catch (Exception e) {
exception = UserException.systemError(e).build(logger);
} finally {
// Notify the listener with ACK.OK both in error/success case because data was send successfully from Drillbit.
bufferWithData.release();
listener.success(Acks.OK, null);
}
}
use of io.netty.buffer.DrillBuf in project drill by apache.
the class RepeatedVarCharOutput method loadVarCharDataAddress.
private void loadVarCharDataAddress() {
@SuppressWarnings("resource") DrillBuf buf = vector.getDataVector().getBuffer();
checkBuf(buf);
this.characterData = buf.memoryAddress();
this.characterDataOriginal = buf.memoryAddress();
this.characterDataMax = buf.memoryAddress() + buf.capacity();
}
use of io.netty.buffer.DrillBuf in project drill by apache.
the class SortRecordBatchBuilder method build.
public void build(FragmentContext context, VectorContainer outputContainer) throws SchemaChangeException {
outputContainer.clear();
if (batches.keySet().size() > 1) {
throw new SchemaChangeException("Sort currently only supports a single schema.");
}
if (batches.size() > Character.MAX_VALUE) {
throw new SchemaChangeException("Sort cannot work on more than %d batches at a time.", (int) Character.MAX_VALUE);
}
if (batches.keys().size() < 1) {
assert false : "Invalid to have an empty set of batches with no schemas.";
}
final DrillBuf svBuffer = reservation.allocateBuffer();
if (svBuffer == null) {
throw new OutOfMemoryError("Failed to allocate direct memory for SV4 vector in SortRecordBatchBuilder.");
}
sv4 = new SelectionVector4(svBuffer, recordCount, Character.MAX_VALUE);
BatchSchema schema = batches.keySet().iterator().next();
List<RecordBatchData> data = batches.get(schema);
// now we're going to generate the sv4 pointers
switch(schema.getSelectionVectorMode()) {
case NONE:
{
int index = 0;
int recordBatchId = 0;
for (RecordBatchData d : data) {
for (int i = 0; i < d.getRecordCount(); i++, index++) {
sv4.set(index, recordBatchId, i);
}
recordBatchId++;
}
break;
}
case TWO_BYTE:
{
int index = 0;
int recordBatchId = 0;
for (RecordBatchData d : data) {
for (int i = 0; i < d.getRecordCount(); i++, index++) {
sv4.set(index, recordBatchId, (int) d.getSv2().getIndex(i));
}
// might as well drop the selection vector since we'll stop using it now.
d.getSv2().clear();
recordBatchId++;
}
break;
}
default:
throw new UnsupportedOperationException();
}
// next, we'll create lists of each of the vector types.
ArrayListMultimap<MaterializedField, ValueVector> vectors = ArrayListMultimap.create();
for (RecordBatchData rbd : batches.values()) {
for (ValueVector v : rbd.getVectors()) {
vectors.put(v.getField(), v);
}
}
for (MaterializedField f : schema) {
List<ValueVector> v = vectors.get(f);
outputContainer.addHyperList(v, false);
}
outputContainer.buildSchema(SelectionVectorMode.FOUR_BYTE);
}
use of io.netty.buffer.DrillBuf in project drill by apache.
the class AsyncPageReader method readDictionaryPageData.
// Read and decode the dictionary data
private void readDictionaryPageData(final ReadStatus readStatus, final ColumnReader<?> parentStatus) throws UserException {
try {
pageHeader = readStatus.getPageHeader();
int uncompressedSize = pageHeader.getUncompressed_page_size();
final DrillBuf dictionaryData = getDecompressedPageData(readStatus);
Stopwatch timer = Stopwatch.createStarted();
allocatedDictionaryBuffers.add(dictionaryData);
DictionaryPage page = new DictionaryPage(asBytesInput(dictionaryData, 0, uncompressedSize), pageHeader.uncompressed_page_size, pageHeader.dictionary_page_header.num_values, valueOf(pageHeader.dictionary_page_header.encoding.name()));
this.dictionary = page.getEncoding().initDictionary(parentStatus.columnDescriptor, page);
long timeToDecode = timer.elapsed(TimeUnit.NANOSECONDS);
stats.timeDictPageDecode.addAndGet(timeToDecode);
} catch (Exception e) {
handleAndThrowException(e, "Error decoding dictionary page.");
}
}
Aggregations