use of org.apache.hadoop.hive.llap.IllegalCacheConfigurationException in project hive by apache.
the class OrcEncodedDataReader method getOrcTailForPath.
/**
* Looks up metadata for the given Orc file in the cache. Will read it in, in case of a cache miss.
* @param path
* @param jobConf
* @param tag
* @param daemonConf
* @param metadataCache
* @return
* @throws IOException
*/
public static OrcTail getOrcTailForPath(Path path, Configuration jobConf, CacheTag tag, Configuration daemonConf, MetadataCache metadataCache, Object fileKey) throws IOException {
Supplier<FileSystem> fsSupplier = getFsSupplier(path, jobConf);
if (fileKey == null) {
fileKey = determineFileId(fsSupplier, path, daemonConf);
}
if (fileKey == null || metadataCache == null) {
throw new IllegalCacheConfigurationException("LLAP metadata cache not available for path " + path.toString());
}
LlapBufferOrBuffers tailBuffers = metadataCache.getFileMetadata(fileKey);
try {
// Cache hit
if (tailBuffers != null) {
return getOrcTailFromLlapBuffers(tailBuffers);
}
// Cache miss
throwIfCacheOnlyRead(HiveConf.getBoolVar(jobConf, ConfVars.LLAP_IO_CACHE_ONLY));
ReaderOptions opts = EncodedOrcFile.readerOptions(jobConf).filesystem(fsSupplier);
Reader reader = EncodedOrcFile.createReader(path, opts);
ByteBuffer tailBufferBb = reader.getSerializedFileFooter();
tailBuffers = metadataCache.putFileMetadata(fileKey, tailBufferBb, tag, new AtomicBoolean(false));
return getOrcTailFromLlapBuffers(tailBuffers);
} finally {
// By this time buffers got locked at either cache look up or cache insert times.
if (tailBuffers != null) {
metadataCache.decRefBuffer(tailBuffers);
}
}
}
use of org.apache.hadoop.hive.llap.IllegalCacheConfigurationException in project hive by apache.
the class VectorizedOrcAcidRowBatchReader method getOrcReaderData.
/**
* Gets the OrcTail from cache if LLAP IO is enabled, otherwise creates the reader to get the tail.
* Always store the Reader along with the Tail as part of ReaderData so we can reuse it.
* @param path The Orc file path we want to get the OrcTail for
* @param conf The Configuration to access LLAP
* @param cacheTag The cacheTag needed to get OrcTail from LLAP IO cache
* @param fileKey fileId of the Orc file (either the Long fileId of HDFS or the SyntheticFileId).
* Optional, if it is not provided, it will be generated, see:
* {@link org.apache.hadoop.hive.ql.io.HdfsUtils.getFileId()}
* @return ReaderData object where the orcTail is not null. Reader can be null, but if we had to create
* one we return that as well for further reuse.
*/
private static ReaderData getOrcReaderData(Path path, Configuration conf, CacheTag cacheTag, Object fileKey) throws IOException {
ReaderData readerData = new ReaderData();
if (shouldReadDeleteDeltasWithLlap(conf, true)) {
try {
readerData.orcTail = LlapProxy.getIo().getOrcTailFromCache(path, conf, cacheTag, fileKey);
readerData.reader = OrcFile.createReader(path, OrcFile.readerOptions(conf).orcTail(readerData.orcTail));
} catch (IllegalCacheConfigurationException icce) {
throw new IOException("LLAP cache is not configured properly while delete delta caching is turned on", icce);
}
}
readerData.reader = OrcFile.createReader(path, OrcFile.readerOptions(conf));
readerData.orcTail = new OrcTail(readerData.reader.getFileTail(), readerData.reader.getSerializedFileFooter());
return readerData;
}
Aggregations