Search in sources :

Example 1 with INVALIDATE_OK

use of org.apache.hadoop.hive.llap.cache.LlapCacheableBuffer.INVALIDATE_OK in project hive by apache.

the class TestOrcMetadataCache method testProactiveEvictionMark.

@Test
public void testProactiveEvictionMark() throws Exception {
    DummyMemoryManager mm = new DummyMemoryManager();
    DummyCachePolicy cp = new DummyCachePolicy();
    final int MAX_ALLOC = 64;
    LlapDaemonCacheMetrics metrics = LlapDaemonCacheMetrics.create("", "");
    BuddyAllocator alloc = new BuddyAllocator(false, false, 8, MAX_ALLOC, 1, 4096, 0, null, mm, metrics, null, true);
    MetadataCache cache = new MetadataCache(alloc, mm, cp, true, metrics);
    long fn1 = 1;
    long fn2 = 2;
    long fn3 = 3;
    AtomicBoolean isStopped = new AtomicBoolean(false);
    // Case for when metadata consists of just 1 buffer (most of the realworld cases)
    ByteBuffer bb = ByteBuffer.wrap("small-meta-data-content".getBytes());
    // Case for when metadata consists of multiple buffers (rare case), (max allocation is 64 hence the test data
    // below is of length 65
    ByteBuffer bb2 = ByteBuffer.wrap("-large-meta-data-content-large-meta-data-content-large-meta-data-".getBytes());
    LlapBufferOrBuffers table1Buffers1 = cache.putFileMetadata(fn1, bb, CacheTag.build("default.table1"), isStopped);
    assertNotNull(table1Buffers1.getSingleLlapBuffer());
    LlapBufferOrBuffers table1Buffers2 = cache.putFileMetadata(fn2, bb2, CacheTag.build("default.table1"), isStopped);
    assertNotNull(table1Buffers2.getMultipleLlapBuffers());
    assertEquals(2, table1Buffers2.getMultipleLlapBuffers().length);
    // Case for when metadata consists of just 1 buffer (most of the realworld cases)
    ByteBuffer bb3 = ByteBuffer.wrap("small-meta-data-content-for-otherFile".getBytes());
    LlapBufferOrBuffers table2Buffers1 = cache.putFileMetadata(fn3, bb3, CacheTag.build("default.table2"), isStopped);
    assertNotNull(table2Buffers1.getSingleLlapBuffer());
    Predicate<CacheTag> predicate = tag -> "default.table1".equals(tag.getTableName());
    // Simulating eviction on some buffers
    table1Buffers2.getMultipleLlapBuffers()[1].decRef();
    assertEquals(INVALIDATE_OK, table1Buffers2.getMultipleLlapBuffers()[1].invalidate());
    // table1Buffers1:27 (allocated as 32) + table1Buffers2[0]:64 (also allocated as 64)
    assertEquals(96, cache.markBuffersForProactiveEviction(predicate, false));
    // Single buffer for file1 should be marked as per predicate
    assertTrue(table1Buffers1.getSingleLlapBuffer().isMarkedForEviction());
    // Multi buffer for file2 should be partially marked as per predicate and prior eviction
    assertTrue(table1Buffers2.getMultipleLlapBuffers()[0].isMarkedForEviction());
    assertFalse(table1Buffers2.getMultipleLlapBuffers()[1].isMarkedForEviction());
    // Single buffer for file3 should not be marked as per predicate
    assertFalse(table2Buffers1.getSingleLlapBuffer().isMarkedForEviction());
}
Also used : FileSystem(org.apache.hadoop.fs.FileSystem) OrcEncodedDataReader(org.apache.hadoop.hive.llap.io.encoded.OrcEncodedDataReader) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) Random(java.util.Random) LlapBufferOrBuffers(org.apache.hadoop.hive.llap.io.metadata.MetadataCache.LlapBufferOrBuffers) FileStatus(org.apache.hadoop.fs.FileStatus) Priority(org.apache.hadoop.hive.llap.cache.LowLevelCache.Priority) ByteBuffer(java.nio.ByteBuffer) Configuration(org.apache.hadoop.conf.Configuration) Path(org.apache.hadoop.fs.Path) SyntheticFileId(org.apache.hadoop.hive.ql.io.SyntheticFileId) CacheTag(org.apache.hadoop.hive.common.io.CacheTag) DiskRange(org.apache.hadoop.hive.common.io.DiskRange) DiskRangeList(org.apache.hadoop.hive.common.io.DiskRangeList) IncompleteCb(org.apache.hadoop.hive.ql.io.orc.encoded.IncompleteCb) MetadataCache(org.apache.hadoop.hive.llap.io.metadata.MetadataCache) INVALIDATE_OK(org.apache.hadoop.hive.llap.cache.LlapCacheableBuffer.INVALIDATE_OK) Predicate(java.util.function.Predicate) IllegalCacheConfigurationException(org.apache.hadoop.hive.llap.IllegalCacheConfigurationException) HiveConf(org.apache.hadoop.hive.conf.HiveConf) OrcTail(org.apache.orc.impl.OrcTail) IOException(java.io.IOException) Test(org.junit.Test) DataCache(org.apache.hadoop.hive.common.io.DataCache) LlapMetadataBuffer(org.apache.hadoop.hive.llap.io.metadata.MetadataCache.LlapMetadataBuffer) LlapDaemonCacheMetrics(org.apache.hadoop.hive.llap.metrics.LlapDaemonCacheMetrics) Assert(org.junit.Assert) MetadataCache(org.apache.hadoop.hive.llap.io.metadata.MetadataCache) ByteBuffer(java.nio.ByteBuffer) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) LlapDaemonCacheMetrics(org.apache.hadoop.hive.llap.metrics.LlapDaemonCacheMetrics) CacheTag(org.apache.hadoop.hive.common.io.CacheTag) LlapBufferOrBuffers(org.apache.hadoop.hive.llap.io.metadata.MetadataCache.LlapBufferOrBuffers) Test(org.junit.Test)

Example 2 with INVALIDATE_OK

use of org.apache.hadoop.hive.llap.cache.LlapCacheableBuffer.INVALIDATE_OK in project hive by apache.

the class TestLowLevelCacheImpl method _testProactiveEvictionMark.

private void _testProactiveEvictionMark(boolean isInstantDeallocation) {
    LowLevelCacheImpl cache = new LowLevelCacheImpl(LlapDaemonCacheMetrics.create("test", "1"), new DummyCachePolicy(), new DummyAllocator(), true, // no cleanup thread
    -1);
    long fn1 = 1;
    long fn2 = 2;
    LlapDataBuffer[] buffs1 = IntStream.range(0, 4).mapToObj(i -> fb()).toArray(LlapDataBuffer[]::new);
    DiskRange[] drs1 = drs(IntStream.range(1, 5).toArray());
    CacheTag tag1 = CacheTag.build("default.table1");
    LlapDataBuffer[] buffs2 = IntStream.range(0, 41).mapToObj(i -> fb()).toArray(LlapDataBuffer[]::new);
    DiskRange[] drs2 = drs(IntStream.range(1, 42).toArray());
    CacheTag tag2 = CacheTag.build("default.table2");
    Predicate<CacheTag> predicate = tag -> "default.table1".equals(tag.getTableName());
    cache.putFileData(fn1, drs1, buffs1, 0, Priority.NORMAL, null, tag1);
    cache.putFileData(fn2, drs2, buffs2, 0, Priority.NORMAL, null, tag2);
    Arrays.stream(buffs1).forEach(b -> {
        b.decRef();
        b.decRef();
    });
    // Simulating eviction on a buffer
    assertEquals(INVALIDATE_OK, buffs1[2].invalidate());
    // buffs1[0,1,3] should be marked, as 2 is already invalidated
    assertEquals(3, cache.markBuffersForProactiveEviction(predicate, isInstantDeallocation));
    for (int i = 0; i < buffs1.length; ++i) {
        LlapDataBuffer buffer = buffs1[i];
        if (i == 2) {
            assertFalse(buffer.isMarkedForEviction());
        } else {
            assertTrue(buffer.isMarkedForEviction());
            assertEquals(isInstantDeallocation, buffer.isInvalid());
        }
    }
    // All buffers for file2 should not be marked as per predicate
    for (LlapDataBuffer buffer : buffs2) {
        assertFalse(buffer.isMarkedForEviction());
    }
}
Also used : IntStream(java.util.stream.IntStream) Arrays(java.util.Arrays) LoggerFactory(org.slf4j.LoggerFactory) FutureTask(java.util.concurrent.FutureTask) Random(java.util.Random) Callable(java.util.concurrent.Callable) MemoryBuffer(org.apache.hadoop.hive.common.io.encoded.MemoryBuffer) Priority(org.apache.hadoop.hive.llap.cache.LowLevelCache.Priority) Assert.assertSame(org.junit.Assert.assertSame) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) CacheTag(org.apache.hadoop.hive.common.io.CacheTag) DiskRange(org.apache.hadoop.hive.common.io.DiskRange) DiskRangeList(org.apache.hadoop.hive.common.io.DiskRangeList) Logger(org.slf4j.Logger) Executor(java.util.concurrent.Executor) INVALIDATE_OK(org.apache.hadoop.hive.llap.cache.LlapCacheableBuffer.INVALIDATE_OK) Predicate(java.util.function.Predicate) Assert.assertTrue(org.junit.Assert.assertTrue) Test(org.junit.Test) CreateHelper(org.apache.hadoop.hive.common.io.DiskRangeList.CreateHelper) CacheChunk(org.apache.hadoop.hive.ql.io.orc.encoded.CacheChunk) Executors(java.util.concurrent.Executors) DiskRangeListFactory(org.apache.hadoop.hive.common.io.DataCache.DiskRangeListFactory) CountDownLatch(java.util.concurrent.CountDownLatch) Assert.assertNull(org.junit.Assert.assertNull) Assert.assertFalse(org.junit.Assert.assertFalse) LlapDaemonCacheMetrics(org.apache.hadoop.hive.llap.metrics.LlapDaemonCacheMetrics) Assert.assertEquals(org.junit.Assert.assertEquals) CacheTag(org.apache.hadoop.hive.common.io.CacheTag) DiskRange(org.apache.hadoop.hive.common.io.DiskRange)

Aggregations

Random (java.util.Random)2 Predicate (java.util.function.Predicate)2 CacheTag (org.apache.hadoop.hive.common.io.CacheTag)2 DiskRange (org.apache.hadoop.hive.common.io.DiskRange)2 DiskRangeList (org.apache.hadoop.hive.common.io.DiskRangeList)2 INVALIDATE_OK (org.apache.hadoop.hive.llap.cache.LlapCacheableBuffer.INVALIDATE_OK)2 Priority (org.apache.hadoop.hive.llap.cache.LowLevelCache.Priority)2 LlapDaemonCacheMetrics (org.apache.hadoop.hive.llap.metrics.LlapDaemonCacheMetrics)2 Test (org.junit.Test)2 IOException (java.io.IOException)1 ByteBuffer (java.nio.ByteBuffer)1 Arrays (java.util.Arrays)1 Callable (java.util.concurrent.Callable)1 CountDownLatch (java.util.concurrent.CountDownLatch)1 Executor (java.util.concurrent.Executor)1 Executors (java.util.concurrent.Executors)1 FutureTask (java.util.concurrent.FutureTask)1 AtomicBoolean (java.util.concurrent.atomic.AtomicBoolean)1 AtomicInteger (java.util.concurrent.atomic.AtomicInteger)1 IntStream (java.util.stream.IntStream)1