use of org.apache.hadoop.hive.common.io.encoded.MemoryBuffer in project hive by apache.
the class StreamUtils method createDiskRangeInfo.
/**
* Converts stream buffers to disk ranges.
* @param streamBuffer - stream buffer
* @return - total length of disk ranges
*/
public static DiskRangeInfo createDiskRangeInfo(ColumnStreamData streamBuffer) {
DiskRangeInfo diskRangeInfo = new DiskRangeInfo(streamBuffer.getIndexBaseOffset());
// See ctor comment.
long offset = streamBuffer.getIndexBaseOffset();
// TODO: we should get rid of this
for (MemoryBuffer memoryBuffer : streamBuffer.getCacheBuffers()) {
ByteBuffer buffer = memoryBuffer.getByteBufferDup();
diskRangeInfo.addDiskRange(new BufferChunk(buffer, offset));
offset += buffer.remaining();
}
return diskRangeInfo;
}
use of org.apache.hadoop.hive.common.io.encoded.MemoryBuffer in project hive by apache.
the class EncodedReaderImpl method copyAndReplaceUncompressedToNonCached.
private static CacheChunk copyAndReplaceUncompressedToNonCached(BufferChunk bc, DataCache cacheWrapper, MemoryBuffer[] singleAlloc) {
singleAlloc[0] = null;
cacheWrapper.getAllocator().allocateMultiple(singleAlloc, bc.getLength());
MemoryBuffer buffer = singleAlloc[0];
cacheWrapper.reuseBuffer(buffer);
ByteBuffer dest = buffer.getByteBufferRaw();
CacheChunk tcc = POOLS.tccPool.take();
tcc.init(buffer, bc.getOffset(), bc.getEnd());
copyUncompressedChunk(bc.getChunk(), dest);
bc.replaceSelfWith(tcc);
return tcc;
}
use of org.apache.hadoop.hive.common.io.encoded.MemoryBuffer in project hive by apache.
the class EncodedReaderImpl method addOneCompressionBlockByteBuffer.
/**
* Add one buffer with compressed data the results for addOneCompressionBuffer (see javadoc).
* @param fullCompressionBlock (fCB) Entire compression block, sliced or copied from disk data.
* @param isUncompressed Whether the data in the block is uncompressed.
* @param cbStartOffset Compressed start offset of the fCB.
* @param cbEndOffset Compressed end offset of the fCB.
* @param lastChunkLength The number of compressed bytes consumed from last *chunk* into fullCompressionBlock.
* @param lastChunk
* @param toDecompress See addOneCompressionBuffer.
* @param cacheBuffers See addOneCompressionBuffer.
* @return New cache buffer.
*/
private ProcCacheChunk addOneCompressionBlockByteBuffer(ByteBuffer fullCompressionBlock, boolean isUncompressed, long cbStartOffset, long cbEndOffset, int lastChunkLength, BufferChunk lastChunk, List<ProcCacheChunk> toDecompress, List<MemoryBuffer> cacheBuffers) {
// Prepare future cache buffer.
MemoryBuffer futureAlloc = cacheWrapper.getAllocator().createUnallocated();
// Add it to result in order we are processing.
cacheBuffers.add(futureAlloc);
// Add it to the list of work to decompress.
ProcCacheChunk cc = POOLS.pccPool.take();
cc.init(cbStartOffset, cbEndOffset, !isUncompressed, fullCompressionBlock, futureAlloc, cacheBuffers.size() - 1);
toDecompress.add(cc);
// Adjust the compression block position.
if (isTracingEnabled) {
LOG.trace("Adjusting " + lastChunk + " to consume " + lastChunkLength + " compressed bytes");
}
lastChunk.getChunk().position(lastChunk.getChunk().position() + lastChunkLength);
// Before anyone else accesses it, it would have been allocated and decompressed locally.
if (lastChunk.getChunk().remaining() <= 0) {
if (isTracingEnabled) {
LOG.trace("Replacing " + lastChunk + " with " + cc + " in the buffers");
}
lastChunk.replaceSelfWith(cc);
} else {
if (isTracingEnabled) {
LOG.trace("Adding " + cc + " before " + lastChunk + " in the buffers");
}
lastChunk.insertPartBefore(cc);
}
return cc;
}
use of org.apache.hadoop.hive.common.io.encoded.MemoryBuffer in project hive by apache.
the class TestBuddyAllocator method allocateAndUseBuffer.
private void allocateAndUseBuffer(BuddyAllocator a, MemoryBuffer[][] allocs, long[][] testValues, int allocCount, int index, int sizeLog2) throws Exception {
allocs[index] = new MemoryBuffer[allocCount];
testValues[index] = new long[allocCount];
int size = (1 << sizeLog2) - 1;
try {
a.allocateMultiple(allocs[index], size);
} catch (AllocatorOutOfMemoryException ex) {
LOG.error("Failed to allocate " + allocCount + " of " + size + "; " + a.debugDump());
throw ex;
}
// LOG.info("Allocated " + allocCount + " of " + size + "; " + a.debugDump());
for (int j = 0; j < allocCount; ++j) {
MemoryBuffer mem = allocs[index][j];
long testValue = testValues[index][j] = rdm.nextLong();
int pos = mem.getByteBufferRaw().position();
mem.getByteBufferRaw().putLong(pos, testValue);
int halfLength = mem.getByteBufferRaw().remaining() >> 1;
if (halfLength + 8 <= mem.getByteBufferRaw().remaining()) {
mem.getByteBufferRaw().putLong(pos + halfLength, testValue);
}
}
}
use of org.apache.hadoop.hive.common.io.encoded.MemoryBuffer in project hive by apache.
the class TestLowLevelCacheImpl method testMTTWithCleanup.
@Test
public void testMTTWithCleanup() {
final LowLevelCacheImpl cache = new LowLevelCacheImpl(LlapDaemonCacheMetrics.create("test", "1"), new DummyCachePolicy(), new DummyAllocator(), true, 1);
final long fn1 = 1, fn2 = 2;
final int offsetsToUse = 8;
final CountDownLatch cdlIn = new CountDownLatch(4), cdlOut = new CountDownLatch(1);
final AtomicInteger rdmsDone = new AtomicInteger(0);
Callable<Long> rdmCall = new Callable<Long>() {
public Long call() {
int gets = 0, puts = 0;
try {
Random rdm = new Random(1234 + Thread.currentThread().getId());
syncThreadStart(cdlIn, cdlOut);
for (int i = 0; i < 20000; ++i) {
boolean isGet = rdm.nextBoolean(), isFn1 = rdm.nextBoolean();
long fileName = isFn1 ? fn1 : fn2;
int fileIndex = isFn1 ? 1 : 2;
int count = rdm.nextInt(offsetsToUse);
if (isGet) {
int[] offsets = new int[count];
count = generateOffsets(offsetsToUse, rdm, offsets);
CreateHelper list = new CreateHelper();
for (int j = 0; i < count; ++i) {
list.addOrMerge(offsets[j], offsets[j] + 1, true, false);
}
DiskRangeList iter = cache.getFileData(fileName, list.get(), 0, testFactory, null, null);
int j = -1;
while (iter != null) {
++j;
if (!(iter instanceof CacheChunk)) {
iter = iter.next;
continue;
}
++gets;
LlapDataBuffer result = (LlapDataBuffer) ((CacheChunk) iter).getBuffer();
assertEquals(makeFakeArenaIndex(fileIndex, offsets[j]), result.arenaIndex);
cache.decRefBuffer(result);
iter = iter.next;
}
} else {
DiskRange[] ranges = new DiskRange[count];
int[] offsets = new int[count];
for (int j = 0; j < count; ++j) {
int next = rdm.nextInt(offsetsToUse);
ranges[j] = dr(next, next + 1);
offsets[j] = next;
}
MemoryBuffer[] buffers = new MemoryBuffer[count];
for (int j = 0; j < offsets.length; ++j) {
LlapDataBuffer buf = LowLevelCacheImpl.allocateFake();
buf.arenaIndex = makeFakeArenaIndex(fileIndex, offsets[j]);
buffers[j] = buf;
}
long[] mask = cache.putFileData(fileName, ranges, buffers, 0, Priority.NORMAL, null);
puts += buffers.length;
long maskVal = 0;
if (mask != null) {
assertEquals(1, mask.length);
maskVal = mask[0];
}
for (int j = 0; j < offsets.length; ++j) {
LlapDataBuffer buf = (LlapDataBuffer) (buffers[j]);
if ((maskVal & 1) == 1) {
assertEquals(makeFakeArenaIndex(fileIndex, offsets[j]), buf.arenaIndex);
}
maskVal >>= 1;
cache.decRefBuffer(buf);
}
}
}
} finally {
rdmsDone.incrementAndGet();
}
return (((long) gets) << 32) | puts;
}
private int makeFakeArenaIndex(int fileIndex, long offset) {
return (int) ((fileIndex << 16) + offset);
}
};
FutureTask<Integer> evictionTask = new FutureTask<Integer>(new Callable<Integer>() {
public Integer call() {
boolean isFirstFile = false;
Random rdm = new Random(1234 + Thread.currentThread().getId());
int evictions = 0;
syncThreadStart(cdlIn, cdlOut);
while (rdmsDone.get() < 3) {
DiskRangeList head = new DiskRangeList(0, offsetsToUse + 1);
isFirstFile = !isFirstFile;
long fileId = isFirstFile ? fn1 : fn2;
head = cache.getFileData(fileId, head, 0, testFactory, null, null);
DiskRange[] results = head.listToArray();
int startIndex = rdm.nextInt(results.length), index = startIndex;
LlapDataBuffer victim = null;
do {
DiskRange r = results[index];
if (r instanceof CacheChunk) {
LlapDataBuffer result = (LlapDataBuffer) ((CacheChunk) r).getBuffer();
cache.decRefBuffer(result);
if (victim == null && result.invalidate()) {
++evictions;
victim = result;
}
}
++index;
if (index == results.length)
index = 0;
} while (index != startIndex);
if (victim == null)
continue;
cache.notifyEvicted(victim);
}
return evictions;
}
});
FutureTask<Long> rdmTask1 = new FutureTask<Long>(rdmCall), rdmTask2 = new FutureTask<Long>(rdmCall), rdmTask3 = new FutureTask<Long>(rdmCall);
Executor threadPool = Executors.newFixedThreadPool(4);
threadPool.execute(rdmTask1);
threadPool.execute(rdmTask2);
threadPool.execute(rdmTask3);
threadPool.execute(evictionTask);
try {
cdlIn.await();
cdlOut.countDown();
long result1 = rdmTask1.get(), result2 = rdmTask2.get(), result3 = rdmTask3.get();
int evictions = evictionTask.get();
LOG.info("MTT test: task 1: " + descRdmTask(result1) + ", task 2: " + descRdmTask(result2) + ", task 3: " + descRdmTask(result3) + "; " + evictions + " evictions");
} catch (Throwable t) {
throw new RuntimeException(t);
}
}
Aggregations