use of org.apache.orc.CompressionCodec in project hive by apache.
the class OrcEncodedDataReader method getStripeFooterFromCacheOrDisk.
private OrcProto.StripeFooter getStripeFooterFromCacheOrDisk(StripeInformation si, OrcBatchKey stripeKey) throws IOException {
boolean hasCache = fileKey != null && metadataCache != null;
if (hasCache) {
LlapBufferOrBuffers footerBuffers = metadataCache.getStripeTail(stripeKey);
if (footerBuffers != null) {
try {
counters.incrCounter(LlapIOCounters.METADATA_CACHE_HIT);
ensureCodecFromFileMetadata();
MemoryBuffer footerBuffer = footerBuffers.getSingleBuffer();
if (footerBuffer != null) {
ByteBuffer bb = footerBuffer.getByteBufferDup();
return buildStripeFooter(Lists.<DiskRange>newArrayList(new BufferChunk(bb, 0)), bb.remaining(), codec, fileMetadata.getCompressionBufferSize());
} else {
MemoryBuffer[] footerBufferArray = footerBuffers.getMultipleBuffers();
int pos = 0;
List<DiskRange> bcs = new ArrayList<>(footerBufferArray.length);
for (MemoryBuffer buf : footerBufferArray) {
ByteBuffer bb = buf.getByteBufferDup();
bcs.add(new BufferChunk(bb, pos));
pos += bb.remaining();
}
return buildStripeFooter(bcs, pos, codec, fileMetadata.getCompressionBufferSize());
}
} finally {
metadataCache.decRefBuffer(footerBuffers);
}
}
counters.incrCounter(LlapIOCounters.METADATA_CACHE_MISS);
}
long offset = si.getOffset() + si.getIndexLength() + si.getDataLength();
long startTime = counters.startTimeCounter();
ensureRawDataReader(true);
// TODO: add this to metadatareader in ORC - SI => metadata buffer, not just metadata.
if (LOG.isTraceEnabled()) {
LOG.trace("Reading [" + offset + ", " + (offset + si.getFooterLength()) + ") based on " + si);
}
DiskRangeList footerRange = rawDataReader.readFileData(new DiskRangeList(offset, offset + si.getFooterLength()), 0, false);
// LOG.error("Got " + RecordReaderUtils.stringifyDiskRanges(footerRange));
counters.incrTimeCounter(LlapIOCounters.HDFS_TIME_NS, startTime);
// Can only happens w/zcr for a single input buffer.
assert footerRange.next == null;
if (hasCache) {
LlapBufferOrBuffers cacheBuf = metadataCache.putStripeTail(stripeKey, footerRange.getData().duplicate(), cacheTag);
// We don't use this one.
metadataCache.decRefBuffer(cacheBuf);
}
ByteBuffer bb = footerRange.getData().duplicate();
CompressionKind kind = orcReader.getCompressionKind();
boolean isPool = useCodecPool;
CompressionCodec codec = isPool ? OrcCodecPool.getCodec(kind) : WriterImpl.createCodec(kind);
boolean isCodecError = true;
try {
OrcProto.StripeFooter result = buildStripeFooter(Lists.<DiskRange>newArrayList(new BufferChunk(bb, 0)), bb.remaining(), codec, orcReader.getCompressionSize());
isCodecError = false;
return result;
} finally {
try {
if (isPool && !isCodecError) {
OrcCodecPool.returnCodec(kind, codec);
} else {
codec.close();
}
} catch (Exception ex) {
LOG.error("Ignoring codec cleanup error", ex);
}
}
}
Aggregations