Search in sources :

Example 1 with DiskRange

use of org.apache.hadoop.hive.common.io.DiskRange in project hive by apache.

the class TestLowLevelCacheImpl method testMTTWithCleanup.

@Test
public void testMTTWithCleanup() {
    final LowLevelCacheImpl cache = new LowLevelCacheImpl(LlapDaemonCacheMetrics.create("test", "1"), new DummyCachePolicy(), new DummyAllocator(), true, 1);
    final long fn1 = 1, fn2 = 2;
    final int offsetsToUse = 8;
    final CountDownLatch cdlIn = new CountDownLatch(4), cdlOut = new CountDownLatch(1);
    final AtomicInteger rdmsDone = new AtomicInteger(0);
    Callable<Long> rdmCall = new Callable<Long>() {

        public Long call() {
            int gets = 0, puts = 0;
            try {
                Random rdm = new Random(1234 + Thread.currentThread().getId());
                syncThreadStart(cdlIn, cdlOut);
                for (int i = 0; i < 20000; ++i) {
                    boolean isGet = rdm.nextBoolean(), isFn1 = rdm.nextBoolean();
                    long fileName = isFn1 ? fn1 : fn2;
                    int fileIndex = isFn1 ? 1 : 2;
                    int count = rdm.nextInt(offsetsToUse);
                    if (isGet) {
                        int[] offsets = new int[count];
                        count = generateOffsets(offsetsToUse, rdm, offsets);
                        CreateHelper list = new CreateHelper();
                        for (int j = 0; i < count; ++i) {
                            list.addOrMerge(offsets[j], offsets[j] + 1, true, false);
                        }
                        DiskRangeList iter = cache.getFileData(fileName, list.get(), 0, testFactory, null, null);
                        int j = -1;
                        while (iter != null) {
                            ++j;
                            if (!(iter instanceof CacheChunk)) {
                                iter = iter.next;
                                continue;
                            }
                            ++gets;
                            LlapDataBuffer result = (LlapDataBuffer) ((CacheChunk) iter).getBuffer();
                            assertEquals(makeFakeArenaIndex(fileIndex, offsets[j]), result.arenaIndex);
                            cache.decRefBuffer(result);
                            iter = iter.next;
                        }
                    } else {
                        DiskRange[] ranges = new DiskRange[count];
                        int[] offsets = new int[count];
                        for (int j = 0; j < count; ++j) {
                            int next = rdm.nextInt(offsetsToUse);
                            ranges[j] = dr(next, next + 1);
                            offsets[j] = next;
                        }
                        MemoryBuffer[] buffers = new MemoryBuffer[count];
                        for (int j = 0; j < offsets.length; ++j) {
                            LlapDataBuffer buf = LowLevelCacheImpl.allocateFake();
                            buf.arenaIndex = makeFakeArenaIndex(fileIndex, offsets[j]);
                            buffers[j] = buf;
                        }
                        long[] mask = cache.putFileData(fileName, ranges, buffers, 0, Priority.NORMAL, null);
                        puts += buffers.length;
                        long maskVal = 0;
                        if (mask != null) {
                            assertEquals(1, mask.length);
                            maskVal = mask[0];
                        }
                        for (int j = 0; j < offsets.length; ++j) {
                            LlapDataBuffer buf = (LlapDataBuffer) (buffers[j]);
                            if ((maskVal & 1) == 1) {
                                assertEquals(makeFakeArenaIndex(fileIndex, offsets[j]), buf.arenaIndex);
                            }
                            maskVal >>= 1;
                            cache.decRefBuffer(buf);
                        }
                    }
                }
            } finally {
                rdmsDone.incrementAndGet();
            }
            return (((long) gets) << 32) | puts;
        }

        private int makeFakeArenaIndex(int fileIndex, long offset) {
            return (int) ((fileIndex << 16) + offset);
        }
    };
    FutureTask<Integer> evictionTask = new FutureTask<Integer>(new Callable<Integer>() {

        public Integer call() {
            boolean isFirstFile = false;
            Random rdm = new Random(1234 + Thread.currentThread().getId());
            int evictions = 0;
            syncThreadStart(cdlIn, cdlOut);
            while (rdmsDone.get() < 3) {
                DiskRangeList head = new DiskRangeList(0, offsetsToUse + 1);
                isFirstFile = !isFirstFile;
                long fileId = isFirstFile ? fn1 : fn2;
                head = cache.getFileData(fileId, head, 0, testFactory, null, null);
                DiskRange[] results = head.listToArray();
                int startIndex = rdm.nextInt(results.length), index = startIndex;
                LlapDataBuffer victim = null;
                do {
                    DiskRange r = results[index];
                    if (r instanceof CacheChunk) {
                        LlapDataBuffer result = (LlapDataBuffer) ((CacheChunk) r).getBuffer();
                        cache.decRefBuffer(result);
                        if (victim == null && result.invalidate()) {
                            ++evictions;
                            victim = result;
                        }
                    }
                    ++index;
                    if (index == results.length)
                        index = 0;
                } while (index != startIndex);
                if (victim == null)
                    continue;
                cache.notifyEvicted(victim);
            }
            return evictions;
        }
    });
    FutureTask<Long> rdmTask1 = new FutureTask<Long>(rdmCall), rdmTask2 = new FutureTask<Long>(rdmCall), rdmTask3 = new FutureTask<Long>(rdmCall);
    Executor threadPool = Executors.newFixedThreadPool(4);
    threadPool.execute(rdmTask1);
    threadPool.execute(rdmTask2);
    threadPool.execute(rdmTask3);
    threadPool.execute(evictionTask);
    try {
        cdlIn.await();
        cdlOut.countDown();
        long result1 = rdmTask1.get(), result2 = rdmTask2.get(), result3 = rdmTask3.get();
        int evictions = evictionTask.get();
        LOG.info("MTT test: task 1: " + descRdmTask(result1) + ", task 2: " + descRdmTask(result2) + ", task 3: " + descRdmTask(result3) + "; " + evictions + " evictions");
    } catch (Throwable t) {
        throw new RuntimeException(t);
    }
}
Also used : DiskRangeList(org.apache.hadoop.hive.common.io.DiskRangeList) Callable(java.util.concurrent.Callable) CreateHelper(org.apache.hadoop.hive.common.io.DiskRangeList.CreateHelper) Executor(java.util.concurrent.Executor) Random(java.util.Random) FutureTask(java.util.concurrent.FutureTask) CacheChunk(org.apache.hadoop.hive.ql.io.orc.encoded.CacheChunk) CountDownLatch(java.util.concurrent.CountDownLatch) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) MemoryBuffer(org.apache.hadoop.hive.common.io.encoded.MemoryBuffer) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) DiskRange(org.apache.hadoop.hive.common.io.DiskRange) Test(org.junit.Test)

Example 2 with DiskRange

use of org.apache.hadoop.hive.common.io.DiskRange in project hive by apache.

the class OrcMetadataCache method putIncompleteCbs.

public void putIncompleteCbs(Object fileKey, DiskRange[] ranges, long baseOffset) {
    if (estimateErrors == null)
        return;
    OrcFileEstimateErrors errorData = estimateErrors.get(fileKey);
    boolean isNew = false;
    // for now; there is no mechanism to properly notify the cache policy/etc. wrt parallel evicts.
    if (errorData == null) {
        errorData = new OrcFileEstimateErrors(fileKey);
        for (DiskRange range : ranges) {
            errorData.addError(range.getOffset(), range.getLength(), baseOffset);
        }
        long memUsage = errorData.estimateMemoryUsage();
        memoryManager.reserveMemory(memUsage, false);
        OrcFileEstimateErrors old = estimateErrors.putIfAbsent(fileKey, errorData);
        if (old != null) {
            errorData = old;
            memoryManager.releaseMemory(memUsage);
            policy.notifyLock(errorData);
        } else {
            isNew = true;
            policy.cache(errorData, Priority.NORMAL);
        }
    }
    if (!isNew) {
        for (DiskRange range : ranges) {
            errorData.addError(range.getOffset(), range.getLength(), baseOffset);
        }
    }
    policy.notifyUnlock(errorData);
}
Also used : DiskRange(org.apache.hadoop.hive.common.io.DiskRange)

Example 3 with DiskRange

use of org.apache.hadoop.hive.common.io.DiskRange in project hive by apache.

the class EncodedReaderImpl method preReadUncompressedStream.

/**
   * To achieve some sort of consistent cache boundaries, we will cache streams deterministically;
   * in segments starting w/stream start, and going for either stream size or some fixed size.
   * If we are not reading the entire segment's worth of data, then we will not cache the partial
   * RGs; the breakage of cache assumptions (no interleaving blocks, etc.) is way too much PITA
   * to handle just for this case.
   * We could avoid copy in non-zcr case and manage the buffer that was not allocated by our
   * allocator. Uncompressed case is not mainline though so let's not complicate it.
   */
private DiskRangeList preReadUncompressedStream(long baseOffset, DiskRangeList start, long streamOffset, long streamEnd) throws IOException {
    if (streamOffset == streamEnd)
        return null;
    List<UncompressedCacheChunk> toCache = null;
    List<ByteBuffer> toRelease = null;
    // 1. Find our bearings in the stream.
    DiskRangeList current = findIntersectingPosition(start, streamOffset, streamEnd);
    if (isTracingEnabled) {
        LOG.trace("Starting pre-read for [" + streamOffset + "," + streamEnd + ") at " + current);
    }
    if (streamOffset > current.getOffset()) {
        // Target compression block is in the middle of the range; slice the range in two.
        current = current.split(streamOffset).next;
    }
    // Account for maximum cache buffer size.
    long streamLen = streamEnd - streamOffset;
    int partSize = determineUncompressedPartSize(), partCount = (int) (streamLen / partSize) + (((streamLen % partSize) != 0) ? 1 : 0);
    CacheChunk lastUncompressed = null;
    MemoryBuffer[] singleAlloc = new MemoryBuffer[1];
    for (int i = 0; i < partCount; ++i) {
        long partOffset = streamOffset + (i * partSize), partEnd = Math.min(partOffset + partSize, streamEnd);
        // We have 0 bytes of data for this part, for now.
        long hasEntirePartTo = partOffset;
        if (current == null) {
            // We have no data from this point on (could be unneeded), skip.
            break;
        }
        assert partOffset <= current.getOffset();
        if (partOffset == current.getOffset() && current instanceof CacheChunk) {
            // We assume cache chunks would always match the way we read, so check and skip it.
            assert current.getOffset() == partOffset && current.getEnd() == partEnd;
            lastUncompressed = (CacheChunk) current;
            current = current.next;
            continue;
        }
        if (current.getOffset() >= partEnd) {
            // We have no data at all for this part of the stream (could be unneeded), skip.
            continue;
        }
        if (toRelease == null && dataReader.isTrackingDiskRanges()) {
            toRelease = new ArrayList<ByteBuffer>();
        }
        // We have some disk buffers... see if we have entire part, etc.
        // We will cache if we have the entire part.
        UncompressedCacheChunk candidateCached = null;
        DiskRangeList next = current;
        while (true) {
            boolean noMoreDataForPart = (next == null || next.getOffset() >= partEnd);
            if (noMoreDataForPart && hasEntirePartTo < partEnd && candidateCached != null) {
                // We are missing a section at the end of the part... copy the start to non-cached.
                lastUncompressed = copyAndReplaceCandidateToNonCached(candidateCached, partOffset, hasEntirePartTo, cacheWrapper, singleAlloc);
                candidateCached = null;
            }
            current = next;
            // Done with this part.
            if (noMoreDataForPart)
                break;
            boolean wasSplit = false;
            if (current.getEnd() > partEnd) {
                // If the current buffer contains multiple parts, split it.
                current = current.split(partEnd);
                wasSplit = true;
            }
            if (isTracingEnabled) {
                LOG.trace("Processing uncompressed file data at [" + current.getOffset() + ", " + current.getEnd() + ")");
            }
            BufferChunk curBc = (BufferChunk) current;
            if (!wasSplit && toRelease != null) {
                // TODO: is it valid to give zcr the modified 2nd part?
                toRelease.add(curBc.getChunk());
            }
            // Track if we still have the entire part.
            long hadEntirePartTo = hasEntirePartTo;
            // We have data until the end of current block if we had it until the beginning.
            hasEntirePartTo = (hasEntirePartTo == current.getOffset()) ? current.getEnd() : -1;
            if (hasEntirePartTo == -1) {
                // with gaps, but it's probably not needed.
                if (candidateCached != null) {
                    assert hadEntirePartTo != -1;
                    copyAndReplaceCandidateToNonCached(candidateCached, partOffset, hadEntirePartTo, cacheWrapper, singleAlloc);
                    candidateCached = null;
                }
                lastUncompressed = copyAndReplaceUncompressedToNonCached(curBc, cacheWrapper, singleAlloc);
                // There may be more data after the gap.
                next = lastUncompressed.next;
            } else {
                // So far we have all the data from the beginning of the part.
                if (candidateCached == null) {
                    candidateCached = new UncompressedCacheChunk(curBc);
                } else {
                    candidateCached.addChunk(curBc);
                }
                next = current.next;
            }
        }
        if (candidateCached != null) {
            if (toCache == null) {
                toCache = new ArrayList<>(partCount - i);
            }
            toCache.add(candidateCached);
        }
    }
    // Nothing to copy and cache.
    if (toCache == null)
        return lastUncompressed;
    MemoryBuffer[] targetBuffers = toCache.size() == 1 ? singleAlloc : new MemoryBuffer[toCache.size()];
    targetBuffers[0] = null;
    DiskRange[] cacheKeys = new DiskRange[toCache.size()];
    int ix = 0;
    for (UncompressedCacheChunk chunk : toCache) {
        // Relies on the fact that cache does not actually store these.
        cacheKeys[ix] = chunk;
        ++ix;
    }
    cacheWrapper.getAllocator().allocateMultiple(targetBuffers, (int) (partCount == 1 ? streamLen : partSize));
    // 4. Now copy the data into cache buffers.
    ix = 0;
    for (UncompressedCacheChunk candidateCached : toCache) {
        candidateCached.setBuffer(targetBuffers[ix]);
        ByteBuffer dest = candidateCached.getBuffer().getByteBufferRaw();
        copyAndReplaceUncompressedChunks(candidateCached, dest, candidateCached);
        candidateCached.clear();
        lastUncompressed = candidateCached;
        ++ix;
    }
    // 5. Release original compressed buffers to zero-copy reader if needed.
    if (toRelease != null) {
        assert dataReader.isTrackingDiskRanges();
        for (ByteBuffer buf : toRelease) {
            dataReader.releaseBuffer(buf);
        }
    }
    // 6. Finally, put uncompressed data to cache.
    if (fileKey != null) {
        long[] collisionMask = cacheWrapper.putFileData(fileKey, cacheKeys, targetBuffers, baseOffset);
        processCacheCollisions(collisionMask, toCache, targetBuffers, null);
    }
    return lastUncompressed;
}
Also used : DiskRangeList(org.apache.hadoop.hive.common.io.DiskRangeList) BufferChunk(org.apache.orc.impl.BufferChunk) ByteBuffer(java.nio.ByteBuffer) MemoryBuffer(org.apache.hadoop.hive.common.io.encoded.MemoryBuffer) DiskRange(org.apache.hadoop.hive.common.io.DiskRange)

Example 4 with DiskRange

use of org.apache.hadoop.hive.common.io.DiskRange in project hive by apache.

the class EncodedReaderImpl method readEncodedStream.

/**
   * Uncompresses part of the stream. RGs can overlap, so we cannot just go and decompress
   * and remove what we have returned. We will keep iterator as a "hint" point.
   * @param baseOffset Absolute offset of boundaries and ranges relative to file, for cache keys.
   * @param start Ordered ranges containing file data. Helpful if they point close to cOffset.
   * @param cOffset Start offset to decompress.
   * @param endCOffset End offset to decompress; estimate, partial CBs will be ignored.
   * @param csd Stream data, to add the results.
   * @param unlockUntilCOffset The offset until which the buffers can be unlocked in cache, as
   *                           they will not be used in future calls (see the class comment in
   *                           EncodedReaderImpl about refcounts).
   * @return Last buffer cached during decompression. Cache buffers are never removed from
   *         the master list, so they are safe to keep as iterators for various streams.
   */
public DiskRangeList readEncodedStream(long baseOffset, DiskRangeList start, long cOffset, long endCOffset, ColumnStreamData csd, long unlockUntilCOffset, long streamOffset) throws IOException {
    if (csd.getCacheBuffers() == null) {
        csd.setCacheBuffers(new ArrayList<MemoryBuffer>());
    } else {
        csd.getCacheBuffers().clear();
    }
    if (cOffset == endCOffset)
        return null;
    boolean isCompressed = codec != null;
    List<ProcCacheChunk> toDecompress = null;
    List<ByteBuffer> toRelease = null;
    List<IncompleteCb> badEstimates = null;
    if (isCompressed) {
        toRelease = !dataReader.isTrackingDiskRanges() ? null : new ArrayList<ByteBuffer>();
        toDecompress = new ArrayList<>();
        badEstimates = new ArrayList<>();
    }
    // 1. Find our bearings in the stream. Normally, iter will already point either to where we
    // want to be, or just before. However, RGs can overlap due to encoding, so we may have
    // to return to a previous block.
    DiskRangeList current = findExactPosition(start, cOffset);
    if (isTracingEnabled) {
        LOG.trace("Starting read for [" + cOffset + "," + endCOffset + ") at " + current);
    }
    CacheChunk lastUncompressed = null;
    // 2. Go thru the blocks; add stuff to results and prepare the decompression work (see below).
    try {
        lastUncompressed = isCompressed ? prepareRangesForCompressedRead(cOffset, endCOffset, streamOffset, unlockUntilCOffset, current, csd, toRelease, toDecompress, badEstimates) : prepareRangesForUncompressedRead(cOffset, endCOffset, streamOffset, unlockUntilCOffset, current, csd);
    } catch (Exception ex) {
        LOG.error("Failed " + (isCompressed ? "" : "un") + "compressed read; cOffset " + cOffset + ", endCOffset " + endCOffset + ", streamOffset " + streamOffset + ", unlockUntilCOffset " + unlockUntilCOffset + "; ranges passed in " + RecordReaderUtils.stringifyDiskRanges(start) + "; ranges passed to prepare " + // Don't log exception here.
        RecordReaderUtils.stringifyDiskRanges(current));
        throw (ex instanceof IOException) ? (IOException) ex : new IOException(ex);
    }
    // 2.5. Remember the bad estimates for future reference.
    if (badEstimates != null && !badEstimates.isEmpty()) {
        // Relies on the fact that cache does not actually store these.
        DiskRange[] cacheKeys = badEstimates.toArray(new DiskRange[badEstimates.size()]);
        long[] result = cacheWrapper.putFileData(fileKey, cacheKeys, null, baseOffset);
        // We don't expect conflicts from bad estimates.
        assert result == null;
    }
    // Nothing to do.
    if (toDecompress == null || toDecompress.isEmpty())
        return lastUncompressed;
    // 3. Allocate the buffers, prepare cache keys.
    // At this point, we have read all the CBs we need to read. cacheBuffers contains some cache
    // data and some unallocated membufs for decompression. toDecompress contains all the work we
    // need to do, and each item points to one of the membufs in cacheBuffers as target. The iter
    // has also been adjusted to point to these buffers instead of compressed data for the ranges.
    MemoryBuffer[] targetBuffers = new MemoryBuffer[toDecompress.size()];
    DiskRange[] cacheKeys = new DiskRange[toDecompress.size()];
    int ix = 0;
    for (ProcCacheChunk chunk : toDecompress) {
        // Relies on the fact that cache does not actually store these.
        cacheKeys[ix] = chunk;
        targetBuffers[ix] = chunk.getBuffer();
        ++ix;
    }
    cacheWrapper.getAllocator().allocateMultiple(targetBuffers, bufferSize);
    // 4. Now decompress (or copy) the data into cache buffers.
    for (ProcCacheChunk chunk : toDecompress) {
        ByteBuffer dest = chunk.getBuffer().getByteBufferRaw();
        if (chunk.isOriginalDataCompressed) {
            decompressChunk(chunk.originalData, codec, dest);
        } else {
            copyUncompressedChunk(chunk.originalData, dest);
        }
        chunk.originalData = null;
        if (isTracingEnabled) {
            LOG.trace("Locking " + chunk.getBuffer() + " due to reuse (after decompression)");
        }
        cacheWrapper.reuseBuffer(chunk.getBuffer());
    }
    // 5. Release original compressed buffers to zero-copy reader if needed.
    if (toRelease != null) {
        assert dataReader.isTrackingDiskRanges();
        for (ByteBuffer buffer : toRelease) {
            dataReader.releaseBuffer(buffer);
        }
    }
    // 6. Finally, put uncompressed data to cache.
    if (fileKey != null) {
        long[] collisionMask = cacheWrapper.putFileData(fileKey, cacheKeys, targetBuffers, baseOffset);
        processCacheCollisions(collisionMask, toDecompress, targetBuffers, csd.getCacheBuffers());
    }
    //    Release initial refcounts.
    for (ProcCacheChunk chunk : toDecompress) {
        ponderReleaseInitialRefcount(unlockUntilCOffset, streamOffset, chunk);
    }
    return lastUncompressed;
}
Also used : DiskRangeList(org.apache.hadoop.hive.common.io.DiskRangeList) ArrayList(java.util.ArrayList) IOException(java.io.IOException) ByteBuffer(java.nio.ByteBuffer) IOException(java.io.IOException) MemoryBuffer(org.apache.hadoop.hive.common.io.encoded.MemoryBuffer) DiskRange(org.apache.hadoop.hive.common.io.DiskRange)

Aggregations

DiskRange (org.apache.hadoop.hive.common.io.DiskRange)4 DiskRangeList (org.apache.hadoop.hive.common.io.DiskRangeList)3 MemoryBuffer (org.apache.hadoop.hive.common.io.encoded.MemoryBuffer)3 ByteBuffer (java.nio.ByteBuffer)2 IOException (java.io.IOException)1 ArrayList (java.util.ArrayList)1 Random (java.util.Random)1 Callable (java.util.concurrent.Callable)1 CountDownLatch (java.util.concurrent.CountDownLatch)1 Executor (java.util.concurrent.Executor)1 FutureTask (java.util.concurrent.FutureTask)1 AtomicInteger (java.util.concurrent.atomic.AtomicInteger)1 CreateHelper (org.apache.hadoop.hive.common.io.DiskRangeList.CreateHelper)1 CacheChunk (org.apache.hadoop.hive.ql.io.orc.encoded.CacheChunk)1 BufferChunk (org.apache.orc.impl.BufferChunk)1 Test (org.junit.Test)1