use of org.apache.hadoop.hive.llap.cache.SerDeLowLevelCacheImpl.FileData in project hive by apache.
the class SerDeEncodedDataReader method cacheFileData.
public void cacheFileData(StripeData sd) {
if (sd == null || sd.getEncodings() == null)
return;
if (fileKey != null) {
// Note that we cache each slice separately. We could cache them together at the end, but
// then we won't be able to pass them to users without inc-refing explicitly.
ColumnEncoding[] encodings = sd.getEncodings();
for (int i = 0; i < encodings.length; ++i) {
// Make data consistent with encodings, don't store useless information.
if (sd.getData()[i] == null) {
encodings[i] = null;
} else if (encodings[i] == null) {
throw new AssertionError("Caching data without an encoding at " + i + ": " + sd);
}
}
FileData fd = new FileData(fileKey, encodings.length);
fd.addStripe(sd);
cache.putFileData(fd, Priority.NORMAL, counters);
} else {
lockAllBuffers(sd);
}
// We assume that if put/lock throws in the middle, it's ok to treat buffers as not being
// locked and to blindly deallocate them, since they are not going to be used. Therefore
// we don't remove them from the cleanup list - we will do it after sending to consumer.
// This relies on sequence of calls to cacheFileData and sendEcb..
}
Aggregations