Search in sources :

Example 1 with MetadataCache

use of org.apache.hadoop.hive.llap.io.metadata.MetadataCache in project hive by apache.

the class TestOrcMetadataCache method testIncompleteCbs.

@Test
public void testIncompleteCbs() throws Exception {
    DummyMemoryManager mm = new DummyMemoryManager();
    DummyCachePolicy cp = new DummyCachePolicy();
    final int MAX_ALLOC = 64;
    LlapDaemonCacheMetrics metrics = LlapDaemonCacheMetrics.create("", "");
    BuddyAllocator alloc = new BuddyAllocator(false, false, 8, MAX_ALLOC, 1, 4096, 0, null, mm, metrics, null);
    MetadataCache cache = new MetadataCache(alloc, mm, cp, true, metrics);
    DataCache.BooleanRef gotAllData = new DataCache.BooleanRef();
    Object fileKey1 = new Object();
    // Note: incomplete CBs are always an exact match.
    cache.putIncompleteCbs(fileKey1, new DiskRange[] { new DiskRangeList(0, 3) }, 0);
    cp.verifyEquals(1);
    DiskRangeList result = cache.getIncompleteCbs(fileKey1, new DiskRangeList(0, 3), 0, gotAllData);
    assertTrue(gotAllData.value);
    verifyResult(result, INCOMPLETE, 0, 3);
    cache.putIncompleteCbs(fileKey1, new DiskRange[] { new DiskRangeList(5, 6) }, 0);
    cp.verifyEquals(3);
    DiskRangeList ranges = new DiskRangeList(0, 3);
    ranges.insertAfter(new DiskRangeList(4, 6));
    result = cache.getIncompleteCbs(fileKey1, ranges, 0, gotAllData);
    assertFalse(gotAllData.value);
    verifyResult(result, INCOMPLETE, 0, 3, DRL, 4, 6);
    ranges = new DiskRangeList(0, 3);
    ranges.insertAfter(new DiskRangeList(3, 5)).insertAfter(new DiskRangeList(5, 6));
    result = cache.getIncompleteCbs(fileKey1, ranges, 0, gotAllData);
    assertFalse(gotAllData.value);
    verifyResult(result, INCOMPLETE, 0, 3, DRL, 3, 5, INCOMPLETE, 5, 6);
    result = cache.getIncompleteCbs(fileKey1, new DiskRangeList(5, 6), 0, gotAllData);
    assertTrue(gotAllData.value);
    verifyResult(result, INCOMPLETE, 5, 6);
    result = cache.getIncompleteCbs(fileKey1, new DiskRangeList(4, 5), 0, gotAllData);
    assertFalse(gotAllData.value);
    verifyResult(result, DRL, 4, 5);
}
Also used : LlapDaemonCacheMetrics(org.apache.hadoop.hive.llap.metrics.LlapDaemonCacheMetrics) DiskRangeList(org.apache.hadoop.hive.common.io.DiskRangeList) MetadataCache(org.apache.hadoop.hive.llap.io.metadata.MetadataCache) DataCache(org.apache.hadoop.hive.common.io.DataCache) Test(org.junit.Test)

Example 2 with MetadataCache

use of org.apache.hadoop.hive.llap.io.metadata.MetadataCache in project hive by apache.

the class TestOrcMetadataCache method testBuffers.

@Test
public void testBuffers() throws Exception {
    DummyMemoryManager mm = new DummyMemoryManager();
    DummyCachePolicy cp = new DummyCachePolicy();
    final int MAX_ALLOC = 64;
    LlapDaemonCacheMetrics metrics = LlapDaemonCacheMetrics.create("", "");
    BuddyAllocator alloc = new BuddyAllocator(false, false, 8, MAX_ALLOC, 1, 4096, 0, null, mm, metrics, null);
    MetadataCache cache = new MetadataCache(alloc, mm, cp, true, metrics);
    Object fileKey1 = new Object();
    Random rdm = new Random();
    ByteBuffer smallBuffer = ByteBuffer.allocate(MAX_ALLOC - 1);
    rdm.nextBytes(smallBuffer.array());
    LlapBufferOrBuffers result = cache.putFileMetadata(fileKey1, smallBuffer);
    cache.decRefBuffer(result);
    ByteBuffer cacheBuf = result.getSingleBuffer().getByteBufferDup();
    assertEquals(smallBuffer, cacheBuf);
    result = cache.putFileMetadata(fileKey1, smallBuffer);
    cache.decRefBuffer(result);
    cacheBuf = result.getSingleBuffer().getByteBufferDup();
    assertEquals(smallBuffer, cacheBuf);
    result = cache.getFileMetadata(fileKey1);
    cacheBuf = result.getSingleBuffer().getByteBufferDup();
    assertEquals(smallBuffer, cacheBuf);
    cache.decRefBuffer(result);
    cache.notifyEvicted((LlapMetadataBuffer<?>) result.getSingleBuffer());
    result = cache.getFileMetadata(fileKey1);
    assertNull(result);
    ByteBuffer largeBuffer = ByteBuffer.allocate((int) (MAX_ALLOC * 2.5));
    rdm.nextBytes(largeBuffer.array());
    result = cache.putFileMetadata(fileKey1, largeBuffer);
    cache.decRefBuffer(result);
    assertNull(result.getSingleBuffer());
    assertEquals(largeBuffer, extractResultBbs(result));
    result = cache.getFileMetadata(fileKey1);
    assertNull(result.getSingleBuffer());
    assertEquals(largeBuffer, extractResultBbs(result));
    LlapAllocatorBuffer b0 = result.getMultipleLlapBuffers()[0], b1 = result.getMultipleLlapBuffers()[1];
    cache.decRefBuffer(result);
    cache.notifyEvicted((LlapMetadataBuffer<?>) b1);
    result = cache.getFileMetadata(fileKey1);
    assertNull(result);
    // Should have also been thrown out.
    assertFalse(b0.incRef() > 0);
}
Also used : LlapDaemonCacheMetrics(org.apache.hadoop.hive.llap.metrics.LlapDaemonCacheMetrics) Random(java.util.Random) MetadataCache(org.apache.hadoop.hive.llap.io.metadata.MetadataCache) ByteBuffer(java.nio.ByteBuffer) LlapBufferOrBuffers(org.apache.hadoop.hive.llap.io.metadata.MetadataCache.LlapBufferOrBuffers) Test(org.junit.Test)

Aggregations

MetadataCache (org.apache.hadoop.hive.llap.io.metadata.MetadataCache)2 LlapDaemonCacheMetrics (org.apache.hadoop.hive.llap.metrics.LlapDaemonCacheMetrics)2 Test (org.junit.Test)2 ByteBuffer (java.nio.ByteBuffer)1 Random (java.util.Random)1 DataCache (org.apache.hadoop.hive.common.io.DataCache)1 DiskRangeList (org.apache.hadoop.hive.common.io.DiskRangeList)1 LlapBufferOrBuffers (org.apache.hadoop.hive.llap.io.metadata.MetadataCache.LlapBufferOrBuffers)1