use of io.netty.buffer.DrillBuf in project drill by apache.
the class AsyncPageReader method decompress.
private DrillBuf decompress(PageHeader pageHeader, DrillBuf compressedData) {
DrillBuf pageDataBuf = null;
Stopwatch timer = Stopwatch.createUnstarted();
long timeToRead;
int compressedSize = pageHeader.getCompressed_page_size();
int uncompressedSize = pageHeader.getUncompressed_page_size();
pageDataBuf = allocateTemporaryBuffer(uncompressedSize);
try {
timer.start();
CompressionCodecName codecName = parentColumnReader.columnChunkMetaData.getCodec();
ByteBuffer input = compressedData.nioBuffer(0, compressedSize);
ByteBuffer output = pageDataBuf.nioBuffer(0, uncompressedSize);
DecompressionHelper decompressionHelper = new DecompressionHelper(codecName);
decompressionHelper.decompress(input, compressedSize, output, uncompressedSize);
pageDataBuf.writerIndex(uncompressedSize);
timeToRead = timer.elapsed(TimeUnit.NANOSECONDS);
this.updateStats(pageHeader, "Decompress", 0, timeToRead, compressedSize, uncompressedSize);
} catch (IOException e) {
handleAndThrowException(e, "Error decompressing data.");
}
return pageDataBuf;
}
use of io.netty.buffer.DrillBuf in project drill by apache.
the class MSortTemplate method setup.
@Override
public void setup(final FragmentContext context, final BufferAllocator allocator, final SelectionVector4 vector4, final VectorContainer hyperBatch, int outputBatchSize) throws SchemaChangeException {
// we pass in the local hyperBatch since that is where we'll be reading data.
Preconditions.checkNotNull(vector4);
this.vector4 = vector4.createNewWrapperCurrent();
this.context = context;
vector4.clear();
doSetup(context, hyperBatch, null);
// Populate the queue with the offset in the SV4 of each
// batch. Note that this is expensive as it requires a scan
// of all items to be sorted: potentially millions.
runStarts.add(0);
int batch = 0;
final int totalCount = this.vector4.getTotalCount();
for (int i = 0; i < totalCount; i++) {
final int newBatch = this.vector4.get(i) >>> 16;
if (newBatch == batch) {
continue;
} else if (newBatch == batch + 1) {
runStarts.add(i);
batch = newBatch;
} else {
throw new UnsupportedOperationException(String.format("Missing batch. batch: %d newBatch: %d", batch, newBatch));
}
}
// Create a temporary SV4 to hold the merged results.
@SuppressWarnings("resource") final DrillBuf drillBuf = allocator.buffer(4 * totalCount);
desiredRecordBatchCount = Math.min(outputBatchSize, Character.MAX_VALUE);
desiredRecordBatchCount = Math.min(desiredRecordBatchCount, totalCount);
aux = new SelectionVector4(drillBuf, totalCount, desiredRecordBatchCount);
}
use of io.netty.buffer.DrillBuf in project drill by apache.
the class PageReader method readDictionaryPage.
private void readDictionaryPage(final PageHeader pageHeader, final ColumnReader<?> parentStatus) throws IOException {
int compressedSize = pageHeader.getCompressed_page_size();
int uncompressedSize = pageHeader.getUncompressed_page_size();
final DrillBuf dictionaryData = readPage(pageHeader, compressedSize, uncompressedSize);
allocatedDictionaryBuffers.add(dictionaryData);
DictionaryPage page = new DictionaryPage(asBytesInput(dictionaryData, 0, uncompressedSize), pageHeader.uncompressed_page_size, pageHeader.dictionary_page_header.num_values, valueOf(pageHeader.dictionary_page_header.encoding.name()));
this.dictionary = page.getEncoding().initDictionary(parentStatus.columnDescriptor, page);
}
use of io.netty.buffer.DrillBuf in project drill by axbaretto.
the class PrintingResultsListener method dataArrived.
@Override
@SuppressWarnings("resource")
public void dataArrived(QueryDataBatch result, ConnectionThrottle throttle) {
final QueryData header = result.getHeader();
final DrillBuf data = result.getData();
try {
if (data != null) {
count.addAndGet(header.getRowCount());
try {
loader.load(header.getDef(), data);
// TODO: Clean: DRILL-2933: That load(...) no longer throws
// SchemaChangeException, so check/clean catch clause below.
} catch (SchemaChangeException e) {
submissionFailed(UserException.systemError(e).build(logger));
}
try {
switch(format) {
case TABLE:
VectorUtil.showVectorAccessibleContent(loader, columnWidth);
break;
case TSV:
VectorUtil.showVectorAccessibleContent(loader, "\t");
break;
case CSV:
VectorUtil.showVectorAccessibleContent(loader, ",");
break;
default:
throw new IllegalStateException(format.toString());
}
} finally {
loader.clear();
}
}
} finally {
result.release();
}
}
use of io.netty.buffer.DrillBuf in project drill by axbaretto.
the class VectorAccessibleSerializable method writeToStream.
/**
* Serializes the VectorAccessible va and writes it to an output stream
* @param output the OutputStream to write to
* @throws IOException
*/
@SuppressWarnings("resource")
@Override
public void writeToStream(OutputStream output) throws IOException {
Preconditions.checkNotNull(output);
final Timer.Context timerContext = metrics.timer(WRITER_TIMER).time();
final DrillBuf[] incomingBuffers = batch.getBuffers();
final UserBitShared.RecordBatchDef batchDef = batch.getDef();
try {
/* Write the metadata to the file */
batchDef.writeDelimitedTo(output);
/* If we have a selection vector, dump it to file first */
if (svMode == BatchSchema.SelectionVectorMode.TWO_BYTE) {
recordCount = sv2.getCount();
final int dataLength = recordCount * SelectionVector2.RECORD_SIZE;
allocator.write(sv2.getBuffer(false), dataLength, output);
}
/* Dump the array of ByteBuf's associated with the value vectors */
for (DrillBuf buf : incomingBuffers) {
/* dump the buffer into the OutputStream */
allocator.write(buf, output);
}
timeNs += timerContext.stop();
} catch (IOException e) {
throw new RuntimeException(e);
} finally {
clear();
}
}
Aggregations