Search in sources :

Example 6 with LlapDaemonCacheMetrics

use of org.apache.hadoop.hive.llap.metrics.LlapDaemonCacheMetrics in project hive by apache.

the class TestLowLevelCacheImpl method testCacheMetrics.

@Test
public void testCacheMetrics() {
    CreateHelper list = new CreateHelper();
    list.addOrMerge(0, 100, true, false);
    list.addOrMerge(100, 200, true, false);
    list.addOrMerge(200, 300, true, false);
    list.addOrMerge(300, 400, true, false);
    list.addOrMerge(400, 500, true, false);
    assertEquals(1, list.get().listSize());
    assertEquals(500, list.get().getTotalLength());
    list = new CreateHelper();
    list.addOrMerge(0, 100, false, false);
    list.addOrMerge(100, 200, false, false);
    list.addOrMerge(200, 300, false, false);
    list.addOrMerge(300, 400, false, false);
    list.addOrMerge(400, 500, false, false);
    assertEquals(5, list.get().listSize());
    assertEquals(500, list.get().getTotalLength());
    list = new CreateHelper();
    list.addOrMerge(0, 100, true, false);
    list.addOrMerge(100, 200, true, false);
    list.addOrMerge(200, 300, false, false);
    list.addOrMerge(300, 400, true, false);
    list.addOrMerge(400, 500, true, false);
    assertEquals(2, list.get().listSize());
    assertEquals(500, list.get().getTotalLength());
    LlapDaemonCacheMetrics metrics = LlapDaemonCacheMetrics.create("test", "1");
    LowLevelCacheImpl cache = new LowLevelCacheImpl(metrics, new DummyCachePolicy(), new DummyAllocator(), true, // no cleanup thread
    -1);
    long fn = 1;
    MemoryBuffer[] fakes = new MemoryBuffer[] { fb(), fb(), fb() };
    cache.putFileData(fn, new DiskRange[] { dr(0, 100), dr(300, 500), dr(800, 1000) }, fakes, 0, Priority.NORMAL, null, null);
    assertEquals(0, metrics.getCacheRequestedBytes());
    assertEquals(0, metrics.getCacheHitBytes());
    list = new CreateHelper();
    list.addOrMerge(0, 1000, true, false);
    cache.getFileData(fn, list.get(), 0, testFactory, null, null);
    assertEquals(1000, metrics.getCacheRequestedBytes());
    assertEquals(500, metrics.getCacheHitBytes());
    list = new CreateHelper();
    list.addOrMerge(0, 100, true, false);
    cache.getFileData(fn, list.get(), 0, testFactory, null, null);
    assertEquals(1100, metrics.getCacheRequestedBytes());
    assertEquals(600, metrics.getCacheHitBytes());
    list = new CreateHelper();
    list.addOrMerge(0, 100, true, false);
    list.addOrMerge(300, 500, true, false);
    list.addOrMerge(800, 1000, true, false);
    cache.getFileData(fn, list.get(), 0, testFactory, null, null);
    assertEquals(1600, metrics.getCacheRequestedBytes());
    assertEquals(1100, metrics.getCacheHitBytes());
    list = new CreateHelper();
    list.addOrMerge(300, 500, true, false);
    list.addOrMerge(1000, 2000, true, false);
    cache.getFileData(fn, list.get(), 0, testFactory, null, null);
    assertEquals(2800, metrics.getCacheRequestedBytes());
    assertEquals(1300, metrics.getCacheHitBytes());
}
Also used : CreateHelper(org.apache.hadoop.hive.common.io.DiskRangeList.CreateHelper) MemoryBuffer(org.apache.hadoop.hive.common.io.encoded.MemoryBuffer) LlapDaemonCacheMetrics(org.apache.hadoop.hive.llap.metrics.LlapDaemonCacheMetrics) Test(org.junit.Test)

Example 7 with LlapDaemonCacheMetrics

use of org.apache.hadoop.hive.llap.metrics.LlapDaemonCacheMetrics in project hive by apache.

the class TestLowLevelLrfuCachePolicy method createMetricsMock.

private MetricsMock createMetricsMock() {
    LlapDaemonCacheMetrics metricsMock = mock(LlapDaemonCacheMetrics.class);
    final AtomicLong cacheUsed = new AtomicLong(0);
    doAnswer(new Answer<Object>() {

        public Object answer(InvocationOnMock invocation) throws Throwable {
            cacheUsed.addAndGet((Long) invocation.getArguments()[0]);
            return null;
        }
    }).when(metricsMock).incrCacheCapacityUsed(anyLong());
    return new MetricsMock(cacheUsed, metricsMock);
}
Also used : AtomicLong(java.util.concurrent.atomic.AtomicLong) LlapDaemonCacheMetrics(org.apache.hadoop.hive.llap.metrics.LlapDaemonCacheMetrics) InvocationOnMock(org.mockito.invocation.InvocationOnMock) ArgumentMatchers.anyLong(org.mockito.ArgumentMatchers.anyLong) AtomicLong(java.util.concurrent.atomic.AtomicLong)

Example 8 with LlapDaemonCacheMetrics

use of org.apache.hadoop.hive.llap.metrics.LlapDaemonCacheMetrics in project hive by apache.

the class TestOrcMetadataCache method testGetOrcTailForPathWithFileId.

@Test
public void testGetOrcTailForPathWithFileId() throws Exception {
    DummyMemoryManager mm = new DummyMemoryManager();
    DummyCachePolicy cp = new DummyCachePolicy();
    final int MAX_ALLOC = 64;
    LlapDaemonCacheMetrics metrics = LlapDaemonCacheMetrics.create("", "");
    BuddyAllocator alloc = new BuddyAllocator(false, false, 8, MAX_ALLOC, 1, 4 * 4096, 0, null, mm, metrics, null, true);
    MetadataCache cache = new MetadataCache(alloc, mm, cp, true, metrics);
    Path path = new Path("../data/files/alltypesorc");
    Configuration jobConf = new Configuration();
    Configuration daemonConf = new Configuration();
    CacheTag tag = CacheTag.build("test-table");
    FileSystem fs = FileSystem.get(daemonConf);
    FileStatus fileStatus = fs.getFileStatus(path);
    OrcTail uncached = OrcEncodedDataReader.getOrcTailForPath(fileStatus.getPath(), jobConf, tag, daemonConf, cache, new SyntheticFileId(fileStatus));
    jobConf.set(HiveConf.ConfVars.LLAP_IO_CACHE_ONLY.varname, "true");
    // this should work from the cache, by recalculating the same fileId
    OrcTail cached = OrcEncodedDataReader.getOrcTailForPath(fileStatus.getPath(), jobConf, tag, daemonConf, cache, null);
    assertEquals(uncached.getSerializedTail(), cached.getSerializedTail());
    assertEquals(uncached.getFileTail(), cached.getFileTail());
}
Also used : Path(org.apache.hadoop.fs.Path) FileStatus(org.apache.hadoop.fs.FileStatus) Configuration(org.apache.hadoop.conf.Configuration) SyntheticFileId(org.apache.hadoop.hive.ql.io.SyntheticFileId) MetadataCache(org.apache.hadoop.hive.llap.io.metadata.MetadataCache) LlapDaemonCacheMetrics(org.apache.hadoop.hive.llap.metrics.LlapDaemonCacheMetrics) FileSystem(org.apache.hadoop.fs.FileSystem) CacheTag(org.apache.hadoop.hive.common.io.CacheTag) OrcTail(org.apache.orc.impl.OrcTail) Test(org.junit.Test)

Example 9 with LlapDaemonCacheMetrics

use of org.apache.hadoop.hive.llap.metrics.LlapDaemonCacheMetrics in project hive by apache.

the class TestOrcMetadataCache method testCaseSomePartialBuffersAreEvicted.

@Test
public void testCaseSomePartialBuffersAreEvicted() {
    final DummyMemoryManager mm = new DummyMemoryManager();
    final DummyCachePolicy cp = new DummyCachePolicy();
    final int MAX_ALLOC = 64;
    final LlapDaemonCacheMetrics metrics = LlapDaemonCacheMetrics.create("", "");
    final BuddyAllocator alloc = new BuddyAllocator(false, false, 8, MAX_ALLOC, 1, 4096, 0, null, mm, metrics, null, true);
    final MetadataCache cache = new MetadataCache(alloc, mm, cp, true, metrics);
    final Object fileKey1 = new Object();
    final Random rdm = new Random();
    final ByteBuffer smallBuffer = ByteBuffer.allocate(2 * MAX_ALLOC);
    rdm.nextBytes(smallBuffer.array());
    // put some metadata in the cache that needs multiple buffers (2 * MAX_ALLOC)
    final LlapBufferOrBuffers result = cache.putFileMetadata(fileKey1, smallBuffer, null, null);
    // assert that we have our 2 buffers
    Assert.assertEquals(2, result.getMultipleLlapBuffers().length);
    final LlapAllocatorBuffer[] buffers = result.getMultipleLlapBuffers();
    // test setup where one buffer is evicted and therefore can not be locked
    buffers[1].decRef();
    buffers[1].invalidateAndRelease();
    // Try to get the buffer should lead to cleaning the cache since some part was evicted.
    Assert.assertNull(cache.getFileMetadata(fileKey1));
}
Also used : LlapDaemonCacheMetrics(org.apache.hadoop.hive.llap.metrics.LlapDaemonCacheMetrics) Random(java.util.Random) MetadataCache(org.apache.hadoop.hive.llap.io.metadata.MetadataCache) ByteBuffer(java.nio.ByteBuffer) LlapBufferOrBuffers(org.apache.hadoop.hive.llap.io.metadata.MetadataCache.LlapBufferOrBuffers) Test(org.junit.Test)

Example 10 with LlapDaemonCacheMetrics

use of org.apache.hadoop.hive.llap.metrics.LlapDaemonCacheMetrics in project hive by apache.

the class TestOrcMetadataCache method testBuffers.

@Test
public void testBuffers() throws Exception {
    DummyMemoryManager mm = new DummyMemoryManager();
    DummyCachePolicy cp = new DummyCachePolicy();
    final int MAX_ALLOC = 64;
    LlapDaemonCacheMetrics metrics = LlapDaemonCacheMetrics.create("", "");
    BuddyAllocator alloc = new BuddyAllocator(false, false, 8, MAX_ALLOC, 1, 4096, 0, null, mm, metrics, null, true);
    MetadataCache cache = new MetadataCache(alloc, mm, cp, true, metrics);
    Object fileKey1 = new Object();
    Random rdm = new Random();
    ByteBuffer smallBuffer = ByteBuffer.allocate(MAX_ALLOC - 1);
    rdm.nextBytes(smallBuffer.array());
    LlapBufferOrBuffers result = cache.putFileMetadata(fileKey1, smallBuffer, null, null);
    cache.decRefBuffer(result);
    ByteBuffer cacheBuf = result.getSingleBuffer().getByteBufferDup();
    assertEquals(smallBuffer, cacheBuf);
    result = cache.putFileMetadata(fileKey1, smallBuffer, null, null);
    cache.decRefBuffer(result);
    cacheBuf = result.getSingleBuffer().getByteBufferDup();
    assertEquals(smallBuffer, cacheBuf);
    result = cache.getFileMetadata(fileKey1);
    cacheBuf = result.getSingleBuffer().getByteBufferDup();
    assertEquals(smallBuffer, cacheBuf);
    cache.decRefBuffer(result);
    cache.notifyEvicted((LlapMetadataBuffer<?>) result.getSingleBuffer());
    result = cache.getFileMetadata(fileKey1);
    assertNull(result);
    ByteBuffer largeBuffer = ByteBuffer.allocate((int) (MAX_ALLOC * 2.5));
    rdm.nextBytes(largeBuffer.array());
    result = cache.putFileMetadata(fileKey1, largeBuffer, null, null);
    cache.decRefBuffer(result);
    assertNull(result.getSingleBuffer());
    assertEquals(largeBuffer, extractResultBbs(result));
    result = cache.getFileMetadata(fileKey1);
    assertNull(result.getSingleBuffer());
    assertEquals(largeBuffer, extractResultBbs(result));
    LlapAllocatorBuffer b0 = result.getMultipleLlapBuffers()[0], b1 = result.getMultipleLlapBuffers()[1];
    cache.decRefBuffer(result);
    cache.notifyEvicted((LlapMetadataBuffer<?>) b1);
    result = cache.getFileMetadata(fileKey1);
    assertNull(result);
    // Should have also been thrown out.
    assertFalse(b0.incRef() > 0);
}
Also used : LlapDaemonCacheMetrics(org.apache.hadoop.hive.llap.metrics.LlapDaemonCacheMetrics) Random(java.util.Random) MetadataCache(org.apache.hadoop.hive.llap.io.metadata.MetadataCache) ByteBuffer(java.nio.ByteBuffer) LlapBufferOrBuffers(org.apache.hadoop.hive.llap.io.metadata.MetadataCache.LlapBufferOrBuffers) Test(org.junit.Test)

Aggregations

LlapDaemonCacheMetrics (org.apache.hadoop.hive.llap.metrics.LlapDaemonCacheMetrics)10 MetadataCache (org.apache.hadoop.hive.llap.io.metadata.MetadataCache)8 Test (org.junit.Test)8 Configuration (org.apache.hadoop.conf.Configuration)5 Path (org.apache.hadoop.fs.Path)4 CacheTag (org.apache.hadoop.hive.common.io.CacheTag)4 ByteBuffer (java.nio.ByteBuffer)3 Random (java.util.Random)3 LlapBufferOrBuffers (org.apache.hadoop.hive.llap.io.metadata.MetadataCache.LlapBufferOrBuffers)3 SyntheticFileId (org.apache.hadoop.hive.ql.io.SyntheticFileId)3 OrcTail (org.apache.orc.impl.OrcTail)3 IOException (java.io.IOException)2 FileStatus (org.apache.hadoop.fs.FileStatus)2 FileSystem (org.apache.hadoop.fs.FileSystem)2 DataCache (org.apache.hadoop.hive.common.io.DataCache)2 DiskRangeList (org.apache.hadoop.hive.common.io.DiskRangeList)2 IllegalCacheConfigurationException (org.apache.hadoop.hive.llap.IllegalCacheConfigurationException)2 AtomicBoolean (java.util.concurrent.atomic.AtomicBoolean)1 AtomicLong (java.util.concurrent.atomic.AtomicLong)1 Predicate (java.util.function.Predicate)1