use of io.netty.buffer.DrillBuf in project drill by apache.
the class PriorityQueueTemplate method resetQueue.
@Override
public void resetQueue(VectorContainer container, SelectionVector4 v4) throws SchemaChangeException {
assert container.getSchema().getSelectionVectorMode() == BatchSchema.SelectionVectorMode.FOUR_BYTE;
BatchSchema schema = container.getSchema();
VectorContainer newContainer = new VectorContainer();
for (MaterializedField field : schema) {
int[] ids = container.getValueVectorId(SchemaPath.getSimplePath(field.getPath())).getFieldIds();
newContainer.add(container.getValueAccessorById(field.getValueClass(), ids).getValueVectors());
}
newContainer.buildSchema(BatchSchema.SelectionVectorMode.FOUR_BYTE);
// Cleanup before recreating hyperbatch and sv4.
cleanup();
hyperBatch = new ExpandableHyperContainer(newContainer);
batchCount = hyperBatch.iterator().next().getValueVectors().length;
@SuppressWarnings("resource") final DrillBuf drillBuf = allocator.buffer(4 * (limit + 1));
heapSv4 = new SelectionVector4(drillBuf, limit, Character.MAX_VALUE);
// Reset queue size (most likely to be set to limit).
queueSize = 0;
for (int i = 0; i < v4.getTotalCount(); i++) {
heapSv4.set(i, v4.get(i));
++queueSize;
}
v4.clear();
doSetup(context, hyperBatch, null);
}
use of io.netty.buffer.DrillBuf in project drill by apache.
the class PageReader method readPage.
private DrillBuf readPage(PageHeader pageHeader, int compressedSize, int uncompressedSize) throws IOException {
DrillBuf pageDataBuf = null;
Stopwatch timer = Stopwatch.createUnstarted();
long timeToRead;
long start = dataReader.getPos();
if (parentColumnReader.columnChunkMetaData.getCodec() == CompressionCodecName.UNCOMPRESSED) {
timer.start();
pageDataBuf = dataReader.getNext(compressedSize);
if (logger.isTraceEnabled()) {
logger.trace("PageReaderTask==> Col: {} readPos: {} Uncompressed_size: {} pageData: {}", parentColumnReader.columnChunkMetaData.toString(), dataReader.getPos(), pageHeader.getUncompressed_page_size(), ByteBufUtil.hexDump(pageData));
}
timeToRead = timer.elapsed(TimeUnit.NANOSECONDS);
this.updateStats(pageHeader, "Page Read", start, timeToRead, compressedSize, uncompressedSize);
} else {
DrillBuf compressedData = null;
pageDataBuf = allocateTemporaryBuffer(uncompressedSize);
try {
timer.start();
compressedData = dataReader.getNext(compressedSize);
timeToRead = timer.elapsed(TimeUnit.NANOSECONDS);
timer.reset();
this.updateStats(pageHeader, "Page Read", start, timeToRead, compressedSize, compressedSize);
start = dataReader.getPos();
timer.start();
codecFactory.getDecompressor(parentColumnReader.columnChunkMetaData.getCodec()).decompress(compressedData.nioBuffer(0, compressedSize), compressedSize, pageDataBuf.nioBuffer(0, uncompressedSize), uncompressedSize);
pageDataBuf.writerIndex(uncompressedSize);
timeToRead = timer.elapsed(TimeUnit.NANOSECONDS);
this.updateStats(pageHeader, "Decompress", start, timeToRead, compressedSize, uncompressedSize);
} finally {
if (compressedData != null) {
compressedData.release();
}
}
}
return pageDataBuf;
}
use of io.netty.buffer.DrillBuf in project drill by apache.
the class AsyncPageReader method getDecompressedPageData.
private DrillBuf getDecompressedPageData(ReadStatus readStatus) {
DrillBuf data;
boolean isDictionary = false;
synchronized (this) {
data = readStatus.getPageData();
readStatus.setPageData(null);
isDictionary = readStatus.isDictionaryPage;
}
if (parentColumnReader.columnChunkMetaData.getCodec() != CompressionCodecName.UNCOMPRESSED) {
DrillBuf compressedData = data;
data = decompress(readStatus.getPageHeader(), compressedData);
synchronized (this) {
readStatus.setPageData(null);
}
compressedData.release();
} else {
if (isDictionary) {
stats.totalDictPageReadBytes.addAndGet(readStatus.bytesRead);
} else {
stats.totalDataPageReadBytes.addAndGet(readStatus.bytesRead);
}
}
return data;
}
use of io.netty.buffer.DrillBuf in project drill by apache.
the class VectorAccessibleSerializable method writeToStream.
/**
* Serializes the VectorAccessible va and writes it to an output stream
* @param output the OutputStream to write to
* @throws IOException
*/
@SuppressWarnings("resource")
@Override
public void writeToStream(OutputStream output) throws IOException {
Preconditions.checkNotNull(output);
final Timer.Context timerContext = metrics.timer(WRITER_TIMER).time();
final DrillBuf[] incomingBuffers = batch.getBuffers();
final UserBitShared.RecordBatchDef batchDef = batch.getDef();
try {
/* Write the metadata to the file */
batchDef.writeDelimitedTo(output);
/* If we have a selection vector, dump it to file first */
if (svMode == BatchSchema.SelectionVectorMode.TWO_BYTE) {
recordCount = sv2.getCount();
final int dataLength = recordCount * SelectionVector2.RECORD_SIZE;
allocator.write(sv2.getBuffer(false), dataLength, output);
}
/* Dump the array of ByteBuf's associated with the value vectors */
for (DrillBuf buf : incomingBuffers) {
/* dump the buffer into the OutputStream */
allocator.write(buf, output);
}
timeNs += timerContext.stop();
} catch (IOException e) {
throw new RuntimeException(e);
} finally {
clear();
}
}
use of io.netty.buffer.DrillBuf in project drill by apache.
the class PrintingResultsListener method dataArrived.
@Override
public void dataArrived(QueryDataBatch result, ConnectionThrottle throttle) {
final QueryData header = result.getHeader();
final DrillBuf data = result.getData();
if (data != null) {
count.addAndGet(header.getRowCount());
try {
loader.load(header.getDef(), data);
// TODO: Clean: DRILL-2933: That load(...) no longer throws
// SchemaChangeException, so check/clean catch clause below.
} catch (SchemaChangeException e) {
submissionFailed(UserException.systemError(e).build(logger));
}
switch(format) {
case TABLE:
VectorUtil.showVectorAccessibleContent(loader, columnWidth);
break;
case TSV:
VectorUtil.showVectorAccessibleContent(loader, "\t");
break;
case CSV:
VectorUtil.showVectorAccessibleContent(loader, ",");
break;
}
loader.clear();
}
result.release();
}
Aggregations