Search in sources :

Example 1 with LlapDaemonCacheMetrics

use of org.apache.hadoop.hive.llap.metrics.LlapDaemonCacheMetrics in project hive by apache.

the class TestOrcMetadataCache method testGetOrcTailForPath.

@Test
public void testGetOrcTailForPath() throws Exception {
    DummyMemoryManager mm = new DummyMemoryManager();
    DummyCachePolicy cp = new DummyCachePolicy();
    final int MAX_ALLOC = 64;
    LlapDaemonCacheMetrics metrics = LlapDaemonCacheMetrics.create("", "");
    BuddyAllocator alloc = new BuddyAllocator(false, false, 8, MAX_ALLOC, 1, 4 * 4096, 0, null, mm, metrics, null, true);
    MetadataCache cache = new MetadataCache(alloc, mm, cp, true, metrics);
    Path path = new Path("../data/files/alltypesorc");
    Configuration jobConf = new Configuration();
    Configuration daemonConf = new Configuration();
    CacheTag tag = CacheTag.build("test-table");
    OrcTail uncached = OrcEncodedDataReader.getOrcTailForPath(path, jobConf, tag, daemonConf, cache, null);
    jobConf.set(HiveConf.ConfVars.LLAP_IO_CACHE_ONLY.varname, "true");
    OrcTail cached = OrcEncodedDataReader.getOrcTailForPath(path, jobConf, tag, daemonConf, cache, null);
    assertEquals(uncached.getSerializedTail(), cached.getSerializedTail());
    assertEquals(uncached.getFileTail(), cached.getFileTail());
}
Also used : Path(org.apache.hadoop.fs.Path) LlapDaemonCacheMetrics(org.apache.hadoop.hive.llap.metrics.LlapDaemonCacheMetrics) Configuration(org.apache.hadoop.conf.Configuration) MetadataCache(org.apache.hadoop.hive.llap.io.metadata.MetadataCache) CacheTag(org.apache.hadoop.hive.common.io.CacheTag) OrcTail(org.apache.orc.impl.OrcTail) Test(org.junit.Test)

Example 2 with LlapDaemonCacheMetrics

use of org.apache.hadoop.hive.llap.metrics.LlapDaemonCacheMetrics in project hive by apache.

the class TestOrcMetadataCache method testGetOrcTailForPathWithFileIdChange.

@Test
public void testGetOrcTailForPathWithFileIdChange() throws Exception {
    DummyMemoryManager mm = new DummyMemoryManager();
    DummyCachePolicy cp = new DummyCachePolicy();
    final int MAX_ALLOC = 64;
    LlapDaemonCacheMetrics metrics = LlapDaemonCacheMetrics.create("", "");
    BuddyAllocator alloc = new BuddyAllocator(false, false, 8, MAX_ALLOC, 1, 4 * 4096, 0, null, mm, metrics, null, true);
    MetadataCache cache = new MetadataCache(alloc, mm, cp, true, metrics);
    Path path = new Path("../data/files/alltypesorc");
    Configuration jobConf = new Configuration();
    Configuration daemonConf = new Configuration();
    CacheTag tag = CacheTag.build("test-table");
    OrcEncodedDataReader.getOrcTailForPath(path, jobConf, tag, daemonConf, cache, new SyntheticFileId(path, 100, 100));
    jobConf.set(HiveConf.ConfVars.LLAP_IO_CACHE_ONLY.varname, "true");
    Exception ex = null;
    try {
        // this should miss the cache, since the fileKey changed
        OrcEncodedDataReader.getOrcTailForPath(path, jobConf, tag, daemonConf, cache, new SyntheticFileId(path, 100, 101));
        fail();
    } catch (IOException e) {
        ex = e;
    }
    Assert.assertTrue(ex.getMessage().contains(HiveConf.ConfVars.LLAP_IO_CACHE_ONLY.varname));
}
Also used : Path(org.apache.hadoop.fs.Path) LlapDaemonCacheMetrics(org.apache.hadoop.hive.llap.metrics.LlapDaemonCacheMetrics) Configuration(org.apache.hadoop.conf.Configuration) SyntheticFileId(org.apache.hadoop.hive.ql.io.SyntheticFileId) MetadataCache(org.apache.hadoop.hive.llap.io.metadata.MetadataCache) CacheTag(org.apache.hadoop.hive.common.io.CacheTag) IOException(java.io.IOException) IllegalCacheConfigurationException(org.apache.hadoop.hive.llap.IllegalCacheConfigurationException) IOException(java.io.IOException) Test(org.junit.Test)

Example 3 with LlapDaemonCacheMetrics

use of org.apache.hadoop.hive.llap.metrics.LlapDaemonCacheMetrics in project hive by apache.

the class TestOrcMetadataCache method testProactiveEvictionMark.

@Test
public void testProactiveEvictionMark() throws Exception {
    DummyMemoryManager mm = new DummyMemoryManager();
    DummyCachePolicy cp = new DummyCachePolicy();
    final int MAX_ALLOC = 64;
    LlapDaemonCacheMetrics metrics = LlapDaemonCacheMetrics.create("", "");
    BuddyAllocator alloc = new BuddyAllocator(false, false, 8, MAX_ALLOC, 1, 4096, 0, null, mm, metrics, null, true);
    MetadataCache cache = new MetadataCache(alloc, mm, cp, true, metrics);
    long fn1 = 1;
    long fn2 = 2;
    long fn3 = 3;
    AtomicBoolean isStopped = new AtomicBoolean(false);
    // Case for when metadata consists of just 1 buffer (most of the realworld cases)
    ByteBuffer bb = ByteBuffer.wrap("small-meta-data-content".getBytes());
    // Case for when metadata consists of multiple buffers (rare case), (max allocation is 64 hence the test data
    // below is of length 65
    ByteBuffer bb2 = ByteBuffer.wrap("-large-meta-data-content-large-meta-data-content-large-meta-data-".getBytes());
    LlapBufferOrBuffers table1Buffers1 = cache.putFileMetadata(fn1, bb, CacheTag.build("default.table1"), isStopped);
    assertNotNull(table1Buffers1.getSingleLlapBuffer());
    LlapBufferOrBuffers table1Buffers2 = cache.putFileMetadata(fn2, bb2, CacheTag.build("default.table1"), isStopped);
    assertNotNull(table1Buffers2.getMultipleLlapBuffers());
    assertEquals(2, table1Buffers2.getMultipleLlapBuffers().length);
    // Case for when metadata consists of just 1 buffer (most of the realworld cases)
    ByteBuffer bb3 = ByteBuffer.wrap("small-meta-data-content-for-otherFile".getBytes());
    LlapBufferOrBuffers table2Buffers1 = cache.putFileMetadata(fn3, bb3, CacheTag.build("default.table2"), isStopped);
    assertNotNull(table2Buffers1.getSingleLlapBuffer());
    Predicate<CacheTag> predicate = tag -> "default.table1".equals(tag.getTableName());
    // Simulating eviction on some buffers
    table1Buffers2.getMultipleLlapBuffers()[1].decRef();
    assertEquals(INVALIDATE_OK, table1Buffers2.getMultipleLlapBuffers()[1].invalidate());
    // table1Buffers1:27 (allocated as 32) + table1Buffers2[0]:64 (also allocated as 64)
    assertEquals(96, cache.markBuffersForProactiveEviction(predicate, false));
    // Single buffer for file1 should be marked as per predicate
    assertTrue(table1Buffers1.getSingleLlapBuffer().isMarkedForEviction());
    // Multi buffer for file2 should be partially marked as per predicate and prior eviction
    assertTrue(table1Buffers2.getMultipleLlapBuffers()[0].isMarkedForEviction());
    assertFalse(table1Buffers2.getMultipleLlapBuffers()[1].isMarkedForEviction());
    // Single buffer for file3 should not be marked as per predicate
    assertFalse(table2Buffers1.getSingleLlapBuffer().isMarkedForEviction());
}
Also used : FileSystem(org.apache.hadoop.fs.FileSystem) OrcEncodedDataReader(org.apache.hadoop.hive.llap.io.encoded.OrcEncodedDataReader) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) Random(java.util.Random) LlapBufferOrBuffers(org.apache.hadoop.hive.llap.io.metadata.MetadataCache.LlapBufferOrBuffers) FileStatus(org.apache.hadoop.fs.FileStatus) Priority(org.apache.hadoop.hive.llap.cache.LowLevelCache.Priority) ByteBuffer(java.nio.ByteBuffer) Configuration(org.apache.hadoop.conf.Configuration) Path(org.apache.hadoop.fs.Path) SyntheticFileId(org.apache.hadoop.hive.ql.io.SyntheticFileId) CacheTag(org.apache.hadoop.hive.common.io.CacheTag) DiskRange(org.apache.hadoop.hive.common.io.DiskRange) DiskRangeList(org.apache.hadoop.hive.common.io.DiskRangeList) IncompleteCb(org.apache.hadoop.hive.ql.io.orc.encoded.IncompleteCb) MetadataCache(org.apache.hadoop.hive.llap.io.metadata.MetadataCache) INVALIDATE_OK(org.apache.hadoop.hive.llap.cache.LlapCacheableBuffer.INVALIDATE_OK) Predicate(java.util.function.Predicate) IllegalCacheConfigurationException(org.apache.hadoop.hive.llap.IllegalCacheConfigurationException) HiveConf(org.apache.hadoop.hive.conf.HiveConf) OrcTail(org.apache.orc.impl.OrcTail) IOException(java.io.IOException) Test(org.junit.Test) DataCache(org.apache.hadoop.hive.common.io.DataCache) LlapMetadataBuffer(org.apache.hadoop.hive.llap.io.metadata.MetadataCache.LlapMetadataBuffer) LlapDaemonCacheMetrics(org.apache.hadoop.hive.llap.metrics.LlapDaemonCacheMetrics) Assert(org.junit.Assert) MetadataCache(org.apache.hadoop.hive.llap.io.metadata.MetadataCache) ByteBuffer(java.nio.ByteBuffer) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) LlapDaemonCacheMetrics(org.apache.hadoop.hive.llap.metrics.LlapDaemonCacheMetrics) CacheTag(org.apache.hadoop.hive.common.io.CacheTag) LlapBufferOrBuffers(org.apache.hadoop.hive.llap.io.metadata.MetadataCache.LlapBufferOrBuffers) Test(org.junit.Test)

Example 4 with LlapDaemonCacheMetrics

use of org.apache.hadoop.hive.llap.metrics.LlapDaemonCacheMetrics in project hive by apache.

the class TestOrcMetadataCache method testIncompleteCbs.

@Test
public void testIncompleteCbs() throws Exception {
    DummyMemoryManager mm = new DummyMemoryManager();
    DummyCachePolicy cp = new DummyCachePolicy();
    final int MAX_ALLOC = 64;
    LlapDaemonCacheMetrics metrics = LlapDaemonCacheMetrics.create("", "");
    BuddyAllocator alloc = new BuddyAllocator(false, false, 8, MAX_ALLOC, 1, 4096, 0, null, mm, metrics, null, true);
    MetadataCache cache = new MetadataCache(alloc, mm, cp, true, metrics);
    DataCache.BooleanRef gotAllData = new DataCache.BooleanRef();
    Object fileKey1 = new Object();
    // Note: incomplete CBs are always an exact match.
    cache.putIncompleteCbs(fileKey1, new DiskRange[] { new DiskRangeList(0, 3) }, 0, null);
    cp.verifyEquals(1);
    DiskRangeList result = cache.getIncompleteCbs(fileKey1, new DiskRangeList(0, 3), 0, gotAllData);
    assertTrue(gotAllData.value);
    verifyResult(result, INCOMPLETE, 0, 3);
    cache.putIncompleteCbs(fileKey1, new DiskRange[] { new DiskRangeList(5, 6) }, 0, null);
    cp.verifyEquals(3);
    DiskRangeList ranges = new DiskRangeList(0, 3);
    ranges.insertAfter(new DiskRangeList(4, 6));
    result = cache.getIncompleteCbs(fileKey1, ranges, 0, gotAllData);
    assertFalse(gotAllData.value);
    verifyResult(result, INCOMPLETE, 0, 3, DRL, 4, 6);
    ranges = new DiskRangeList(0, 3);
    ranges.insertAfter(new DiskRangeList(3, 5)).insertAfter(new DiskRangeList(5, 6));
    result = cache.getIncompleteCbs(fileKey1, ranges, 0, gotAllData);
    assertFalse(gotAllData.value);
    verifyResult(result, INCOMPLETE, 0, 3, DRL, 3, 5, INCOMPLETE, 5, 6);
    result = cache.getIncompleteCbs(fileKey1, new DiskRangeList(5, 6), 0, gotAllData);
    assertTrue(gotAllData.value);
    verifyResult(result, INCOMPLETE, 5, 6);
    result = cache.getIncompleteCbs(fileKey1, new DiskRangeList(4, 5), 0, gotAllData);
    assertFalse(gotAllData.value);
    verifyResult(result, DRL, 4, 5);
}
Also used : LlapDaemonCacheMetrics(org.apache.hadoop.hive.llap.metrics.LlapDaemonCacheMetrics) DiskRangeList(org.apache.hadoop.hive.common.io.DiskRangeList) MetadataCache(org.apache.hadoop.hive.llap.io.metadata.MetadataCache) DataCache(org.apache.hadoop.hive.common.io.DataCache) Test(org.junit.Test)

Example 5 with LlapDaemonCacheMetrics

use of org.apache.hadoop.hive.llap.metrics.LlapDaemonCacheMetrics in project hive by apache.

the class TestLlapCacheMetadataSerializer method setUp.

@Before
public void setUp() {
    conf = new Configuration();
    HiveConf.setIntVar(conf, HiveConf.ConfVars.LLAP_LRFU_BP_WRAPPER_SIZE, 1);
    HiveConf.setFloatVar(conf, HiveConf.ConfVars.LLAP_LRFU_HOTBUFFERS_PERCENTAGE, 1.0f);
    BuddyAllocator buddyAllocator = TestBuddyAllocatorForceEvict.create(16384, 2, 32768, false, true);
    LlapDaemonCacheMetrics metrics = LlapDaemonCacheMetrics.create("", "");
    cachePolicy = new LowLevelLrfuCachePolicy(1, 5, conf);
    LowLevelCacheImpl cache = new LowLevelCacheImpl(metrics, cachePolicy, buddyAllocator, true);
    fileMetadataCache = new MetadataCache(buddyAllocator, null, cachePolicy, false, metrics);
    tracePool = IoTrace.createTracePool(conf);
    mockDataCache = new LlapIoMocks.MockDataCache(cache, buddyAllocator, cachePolicy);
    pathCache = new MemoryLimitedPathCache(conf);
    tracePool = IoTrace.createTracePool(conf);
    serializer = new LlapCacheMetadataSerializer(fileMetadataCache, mockDataCache, conf, pathCache, tracePool, cachePolicy);
}
Also used : LlapIoMocks(org.apache.hadoop.hive.llap.io.LlapIoMocks) LlapDaemonCacheMetrics(org.apache.hadoop.hive.llap.metrics.LlapDaemonCacheMetrics) Configuration(org.apache.hadoop.conf.Configuration) LowLevelCacheImpl(org.apache.hadoop.hive.llap.cache.LowLevelCacheImpl) LowLevelLrfuCachePolicy(org.apache.hadoop.hive.llap.cache.LowLevelLrfuCachePolicy) FileMetadataCache(org.apache.hadoop.hive.common.io.FileMetadataCache) MetadataCache(org.apache.hadoop.hive.llap.io.metadata.MetadataCache) BuddyAllocator(org.apache.hadoop.hive.llap.cache.BuddyAllocator) MemoryLimitedPathCache(org.apache.hadoop.hive.llap.cache.MemoryLimitedPathCache) Before(org.junit.Before)

Aggregations

LlapDaemonCacheMetrics (org.apache.hadoop.hive.llap.metrics.LlapDaemonCacheMetrics)10 MetadataCache (org.apache.hadoop.hive.llap.io.metadata.MetadataCache)8 Test (org.junit.Test)8 Configuration (org.apache.hadoop.conf.Configuration)5 Path (org.apache.hadoop.fs.Path)4 CacheTag (org.apache.hadoop.hive.common.io.CacheTag)4 ByteBuffer (java.nio.ByteBuffer)3 Random (java.util.Random)3 LlapBufferOrBuffers (org.apache.hadoop.hive.llap.io.metadata.MetadataCache.LlapBufferOrBuffers)3 SyntheticFileId (org.apache.hadoop.hive.ql.io.SyntheticFileId)3 OrcTail (org.apache.orc.impl.OrcTail)3 IOException (java.io.IOException)2 FileStatus (org.apache.hadoop.fs.FileStatus)2 FileSystem (org.apache.hadoop.fs.FileSystem)2 DataCache (org.apache.hadoop.hive.common.io.DataCache)2 DiskRangeList (org.apache.hadoop.hive.common.io.DiskRangeList)2 IllegalCacheConfigurationException (org.apache.hadoop.hive.llap.IllegalCacheConfigurationException)2 AtomicBoolean (java.util.concurrent.atomic.AtomicBoolean)1 AtomicLong (java.util.concurrent.atomic.AtomicLong)1 Predicate (java.util.function.Predicate)1