use of org.apache.hadoop.hive.llap.metrics.LlapDaemonCacheMetrics in project hive by apache.
the class TestLowLevelCacheImpl method testCacheMetrics.
@Test
public void testCacheMetrics() {
CreateHelper list = new CreateHelper();
list.addOrMerge(0, 100, true, false);
list.addOrMerge(100, 200, true, false);
list.addOrMerge(200, 300, true, false);
list.addOrMerge(300, 400, true, false);
list.addOrMerge(400, 500, true, false);
assertEquals(1, list.get().listSize());
assertEquals(500, list.get().getTotalLength());
list = new CreateHelper();
list.addOrMerge(0, 100, false, false);
list.addOrMerge(100, 200, false, false);
list.addOrMerge(200, 300, false, false);
list.addOrMerge(300, 400, false, false);
list.addOrMerge(400, 500, false, false);
assertEquals(5, list.get().listSize());
assertEquals(500, list.get().getTotalLength());
list = new CreateHelper();
list.addOrMerge(0, 100, true, false);
list.addOrMerge(100, 200, true, false);
list.addOrMerge(200, 300, false, false);
list.addOrMerge(300, 400, true, false);
list.addOrMerge(400, 500, true, false);
assertEquals(2, list.get().listSize());
assertEquals(500, list.get().getTotalLength());
LlapDaemonCacheMetrics metrics = LlapDaemonCacheMetrics.create("test", "1");
LowLevelCacheImpl cache = new LowLevelCacheImpl(metrics, new DummyCachePolicy(), new DummyAllocator(), true, // no cleanup thread
-1);
long fn = 1;
MemoryBuffer[] fakes = new MemoryBuffer[] { fb(), fb(), fb() };
cache.putFileData(fn, new DiskRange[] { dr(0, 100), dr(300, 500), dr(800, 1000) }, fakes, 0, Priority.NORMAL, null, null);
assertEquals(0, metrics.getCacheRequestedBytes());
assertEquals(0, metrics.getCacheHitBytes());
list = new CreateHelper();
list.addOrMerge(0, 1000, true, false);
cache.getFileData(fn, list.get(), 0, testFactory, null, null);
assertEquals(1000, metrics.getCacheRequestedBytes());
assertEquals(500, metrics.getCacheHitBytes());
list = new CreateHelper();
list.addOrMerge(0, 100, true, false);
cache.getFileData(fn, list.get(), 0, testFactory, null, null);
assertEquals(1100, metrics.getCacheRequestedBytes());
assertEquals(600, metrics.getCacheHitBytes());
list = new CreateHelper();
list.addOrMerge(0, 100, true, false);
list.addOrMerge(300, 500, true, false);
list.addOrMerge(800, 1000, true, false);
cache.getFileData(fn, list.get(), 0, testFactory, null, null);
assertEquals(1600, metrics.getCacheRequestedBytes());
assertEquals(1100, metrics.getCacheHitBytes());
list = new CreateHelper();
list.addOrMerge(300, 500, true, false);
list.addOrMerge(1000, 2000, true, false);
cache.getFileData(fn, list.get(), 0, testFactory, null, null);
assertEquals(2800, metrics.getCacheRequestedBytes());
assertEquals(1300, metrics.getCacheHitBytes());
}
use of org.apache.hadoop.hive.llap.metrics.LlapDaemonCacheMetrics in project hive by apache.
the class TestLowLevelLrfuCachePolicy method createMetricsMock.
private MetricsMock createMetricsMock() {
LlapDaemonCacheMetrics metricsMock = mock(LlapDaemonCacheMetrics.class);
final AtomicLong cacheUsed = new AtomicLong(0);
doAnswer(new Answer<Object>() {
public Object answer(InvocationOnMock invocation) throws Throwable {
cacheUsed.addAndGet((Long) invocation.getArguments()[0]);
return null;
}
}).when(metricsMock).incrCacheCapacityUsed(anyLong());
return new MetricsMock(cacheUsed, metricsMock);
}
use of org.apache.hadoop.hive.llap.metrics.LlapDaemonCacheMetrics in project hive by apache.
the class TestOrcMetadataCache method testGetOrcTailForPathWithFileId.
@Test
public void testGetOrcTailForPathWithFileId() throws Exception {
DummyMemoryManager mm = new DummyMemoryManager();
DummyCachePolicy cp = new DummyCachePolicy();
final int MAX_ALLOC = 64;
LlapDaemonCacheMetrics metrics = LlapDaemonCacheMetrics.create("", "");
BuddyAllocator alloc = new BuddyAllocator(false, false, 8, MAX_ALLOC, 1, 4 * 4096, 0, null, mm, metrics, null, true);
MetadataCache cache = new MetadataCache(alloc, mm, cp, true, metrics);
Path path = new Path("../data/files/alltypesorc");
Configuration jobConf = new Configuration();
Configuration daemonConf = new Configuration();
CacheTag tag = CacheTag.build("test-table");
FileSystem fs = FileSystem.get(daemonConf);
FileStatus fileStatus = fs.getFileStatus(path);
OrcTail uncached = OrcEncodedDataReader.getOrcTailForPath(fileStatus.getPath(), jobConf, tag, daemonConf, cache, new SyntheticFileId(fileStatus));
jobConf.set(HiveConf.ConfVars.LLAP_IO_CACHE_ONLY.varname, "true");
// this should work from the cache, by recalculating the same fileId
OrcTail cached = OrcEncodedDataReader.getOrcTailForPath(fileStatus.getPath(), jobConf, tag, daemonConf, cache, null);
assertEquals(uncached.getSerializedTail(), cached.getSerializedTail());
assertEquals(uncached.getFileTail(), cached.getFileTail());
}
use of org.apache.hadoop.hive.llap.metrics.LlapDaemonCacheMetrics in project hive by apache.
the class TestOrcMetadataCache method testCaseSomePartialBuffersAreEvicted.
@Test
public void testCaseSomePartialBuffersAreEvicted() {
final DummyMemoryManager mm = new DummyMemoryManager();
final DummyCachePolicy cp = new DummyCachePolicy();
final int MAX_ALLOC = 64;
final LlapDaemonCacheMetrics metrics = LlapDaemonCacheMetrics.create("", "");
final BuddyAllocator alloc = new BuddyAllocator(false, false, 8, MAX_ALLOC, 1, 4096, 0, null, mm, metrics, null, true);
final MetadataCache cache = new MetadataCache(alloc, mm, cp, true, metrics);
final Object fileKey1 = new Object();
final Random rdm = new Random();
final ByteBuffer smallBuffer = ByteBuffer.allocate(2 * MAX_ALLOC);
rdm.nextBytes(smallBuffer.array());
// put some metadata in the cache that needs multiple buffers (2 * MAX_ALLOC)
final LlapBufferOrBuffers result = cache.putFileMetadata(fileKey1, smallBuffer, null, null);
// assert that we have our 2 buffers
Assert.assertEquals(2, result.getMultipleLlapBuffers().length);
final LlapAllocatorBuffer[] buffers = result.getMultipleLlapBuffers();
// test setup where one buffer is evicted and therefore can not be locked
buffers[1].decRef();
buffers[1].invalidateAndRelease();
// Try to get the buffer should lead to cleaning the cache since some part was evicted.
Assert.assertNull(cache.getFileMetadata(fileKey1));
}
use of org.apache.hadoop.hive.llap.metrics.LlapDaemonCacheMetrics in project hive by apache.
the class TestOrcMetadataCache method testBuffers.
@Test
public void testBuffers() throws Exception {
DummyMemoryManager mm = new DummyMemoryManager();
DummyCachePolicy cp = new DummyCachePolicy();
final int MAX_ALLOC = 64;
LlapDaemonCacheMetrics metrics = LlapDaemonCacheMetrics.create("", "");
BuddyAllocator alloc = new BuddyAllocator(false, false, 8, MAX_ALLOC, 1, 4096, 0, null, mm, metrics, null, true);
MetadataCache cache = new MetadataCache(alloc, mm, cp, true, metrics);
Object fileKey1 = new Object();
Random rdm = new Random();
ByteBuffer smallBuffer = ByteBuffer.allocate(MAX_ALLOC - 1);
rdm.nextBytes(smallBuffer.array());
LlapBufferOrBuffers result = cache.putFileMetadata(fileKey1, smallBuffer, null, null);
cache.decRefBuffer(result);
ByteBuffer cacheBuf = result.getSingleBuffer().getByteBufferDup();
assertEquals(smallBuffer, cacheBuf);
result = cache.putFileMetadata(fileKey1, smallBuffer, null, null);
cache.decRefBuffer(result);
cacheBuf = result.getSingleBuffer().getByteBufferDup();
assertEquals(smallBuffer, cacheBuf);
result = cache.getFileMetadata(fileKey1);
cacheBuf = result.getSingleBuffer().getByteBufferDup();
assertEquals(smallBuffer, cacheBuf);
cache.decRefBuffer(result);
cache.notifyEvicted((LlapMetadataBuffer<?>) result.getSingleBuffer());
result = cache.getFileMetadata(fileKey1);
assertNull(result);
ByteBuffer largeBuffer = ByteBuffer.allocate((int) (MAX_ALLOC * 2.5));
rdm.nextBytes(largeBuffer.array());
result = cache.putFileMetadata(fileKey1, largeBuffer, null, null);
cache.decRefBuffer(result);
assertNull(result.getSingleBuffer());
assertEquals(largeBuffer, extractResultBbs(result));
result = cache.getFileMetadata(fileKey1);
assertNull(result.getSingleBuffer());
assertEquals(largeBuffer, extractResultBbs(result));
LlapAllocatorBuffer b0 = result.getMultipleLlapBuffers()[0], b1 = result.getMultipleLlapBuffers()[1];
cache.decRefBuffer(result);
cache.notifyEvicted((LlapMetadataBuffer<?>) b1);
result = cache.getFileMetadata(fileKey1);
assertNull(result);
// Should have also been thrown out.
assertFalse(b0.incRef() > 0);
}
Aggregations