use of org.apache.hadoop.hive.llap.metrics.LlapDaemonCacheMetrics in project hive by apache.
the class TestOrcMetadataCache method testGetOrcTailForPath.
@Test
public void testGetOrcTailForPath() throws Exception {
DummyMemoryManager mm = new DummyMemoryManager();
DummyCachePolicy cp = new DummyCachePolicy();
final int MAX_ALLOC = 64;
LlapDaemonCacheMetrics metrics = LlapDaemonCacheMetrics.create("", "");
BuddyAllocator alloc = new BuddyAllocator(false, false, 8, MAX_ALLOC, 1, 4 * 4096, 0, null, mm, metrics, null, true);
MetadataCache cache = new MetadataCache(alloc, mm, cp, true, metrics);
Path path = new Path("../data/files/alltypesorc");
Configuration jobConf = new Configuration();
Configuration daemonConf = new Configuration();
CacheTag tag = CacheTag.build("test-table");
OrcTail uncached = OrcEncodedDataReader.getOrcTailForPath(path, jobConf, tag, daemonConf, cache, null);
jobConf.set(HiveConf.ConfVars.LLAP_IO_CACHE_ONLY.varname, "true");
OrcTail cached = OrcEncodedDataReader.getOrcTailForPath(path, jobConf, tag, daemonConf, cache, null);
assertEquals(uncached.getSerializedTail(), cached.getSerializedTail());
assertEquals(uncached.getFileTail(), cached.getFileTail());
}
use of org.apache.hadoop.hive.llap.metrics.LlapDaemonCacheMetrics in project hive by apache.
the class TestOrcMetadataCache method testGetOrcTailForPathWithFileIdChange.
@Test
public void testGetOrcTailForPathWithFileIdChange() throws Exception {
DummyMemoryManager mm = new DummyMemoryManager();
DummyCachePolicy cp = new DummyCachePolicy();
final int MAX_ALLOC = 64;
LlapDaemonCacheMetrics metrics = LlapDaemonCacheMetrics.create("", "");
BuddyAllocator alloc = new BuddyAllocator(false, false, 8, MAX_ALLOC, 1, 4 * 4096, 0, null, mm, metrics, null, true);
MetadataCache cache = new MetadataCache(alloc, mm, cp, true, metrics);
Path path = new Path("../data/files/alltypesorc");
Configuration jobConf = new Configuration();
Configuration daemonConf = new Configuration();
CacheTag tag = CacheTag.build("test-table");
OrcEncodedDataReader.getOrcTailForPath(path, jobConf, tag, daemonConf, cache, new SyntheticFileId(path, 100, 100));
jobConf.set(HiveConf.ConfVars.LLAP_IO_CACHE_ONLY.varname, "true");
Exception ex = null;
try {
// this should miss the cache, since the fileKey changed
OrcEncodedDataReader.getOrcTailForPath(path, jobConf, tag, daemonConf, cache, new SyntheticFileId(path, 100, 101));
fail();
} catch (IOException e) {
ex = e;
}
Assert.assertTrue(ex.getMessage().contains(HiveConf.ConfVars.LLAP_IO_CACHE_ONLY.varname));
}
use of org.apache.hadoop.hive.llap.metrics.LlapDaemonCacheMetrics in project hive by apache.
the class TestOrcMetadataCache method testProactiveEvictionMark.
@Test
public void testProactiveEvictionMark() throws Exception {
DummyMemoryManager mm = new DummyMemoryManager();
DummyCachePolicy cp = new DummyCachePolicy();
final int MAX_ALLOC = 64;
LlapDaemonCacheMetrics metrics = LlapDaemonCacheMetrics.create("", "");
BuddyAllocator alloc = new BuddyAllocator(false, false, 8, MAX_ALLOC, 1, 4096, 0, null, mm, metrics, null, true);
MetadataCache cache = new MetadataCache(alloc, mm, cp, true, metrics);
long fn1 = 1;
long fn2 = 2;
long fn3 = 3;
AtomicBoolean isStopped = new AtomicBoolean(false);
// Case for when metadata consists of just 1 buffer (most of the realworld cases)
ByteBuffer bb = ByteBuffer.wrap("small-meta-data-content".getBytes());
// Case for when metadata consists of multiple buffers (rare case), (max allocation is 64 hence the test data
// below is of length 65
ByteBuffer bb2 = ByteBuffer.wrap("-large-meta-data-content-large-meta-data-content-large-meta-data-".getBytes());
LlapBufferOrBuffers table1Buffers1 = cache.putFileMetadata(fn1, bb, CacheTag.build("default.table1"), isStopped);
assertNotNull(table1Buffers1.getSingleLlapBuffer());
LlapBufferOrBuffers table1Buffers2 = cache.putFileMetadata(fn2, bb2, CacheTag.build("default.table1"), isStopped);
assertNotNull(table1Buffers2.getMultipleLlapBuffers());
assertEquals(2, table1Buffers2.getMultipleLlapBuffers().length);
// Case for when metadata consists of just 1 buffer (most of the realworld cases)
ByteBuffer bb3 = ByteBuffer.wrap("small-meta-data-content-for-otherFile".getBytes());
LlapBufferOrBuffers table2Buffers1 = cache.putFileMetadata(fn3, bb3, CacheTag.build("default.table2"), isStopped);
assertNotNull(table2Buffers1.getSingleLlapBuffer());
Predicate<CacheTag> predicate = tag -> "default.table1".equals(tag.getTableName());
// Simulating eviction on some buffers
table1Buffers2.getMultipleLlapBuffers()[1].decRef();
assertEquals(INVALIDATE_OK, table1Buffers2.getMultipleLlapBuffers()[1].invalidate());
// table1Buffers1:27 (allocated as 32) + table1Buffers2[0]:64 (also allocated as 64)
assertEquals(96, cache.markBuffersForProactiveEviction(predicate, false));
// Single buffer for file1 should be marked as per predicate
assertTrue(table1Buffers1.getSingleLlapBuffer().isMarkedForEviction());
// Multi buffer for file2 should be partially marked as per predicate and prior eviction
assertTrue(table1Buffers2.getMultipleLlapBuffers()[0].isMarkedForEviction());
assertFalse(table1Buffers2.getMultipleLlapBuffers()[1].isMarkedForEviction());
// Single buffer for file3 should not be marked as per predicate
assertFalse(table2Buffers1.getSingleLlapBuffer().isMarkedForEviction());
}
use of org.apache.hadoop.hive.llap.metrics.LlapDaemonCacheMetrics in project hive by apache.
the class TestOrcMetadataCache method testIncompleteCbs.
@Test
public void testIncompleteCbs() throws Exception {
DummyMemoryManager mm = new DummyMemoryManager();
DummyCachePolicy cp = new DummyCachePolicy();
final int MAX_ALLOC = 64;
LlapDaemonCacheMetrics metrics = LlapDaemonCacheMetrics.create("", "");
BuddyAllocator alloc = new BuddyAllocator(false, false, 8, MAX_ALLOC, 1, 4096, 0, null, mm, metrics, null, true);
MetadataCache cache = new MetadataCache(alloc, mm, cp, true, metrics);
DataCache.BooleanRef gotAllData = new DataCache.BooleanRef();
Object fileKey1 = new Object();
// Note: incomplete CBs are always an exact match.
cache.putIncompleteCbs(fileKey1, new DiskRange[] { new DiskRangeList(0, 3) }, 0, null);
cp.verifyEquals(1);
DiskRangeList result = cache.getIncompleteCbs(fileKey1, new DiskRangeList(0, 3), 0, gotAllData);
assertTrue(gotAllData.value);
verifyResult(result, INCOMPLETE, 0, 3);
cache.putIncompleteCbs(fileKey1, new DiskRange[] { new DiskRangeList(5, 6) }, 0, null);
cp.verifyEquals(3);
DiskRangeList ranges = new DiskRangeList(0, 3);
ranges.insertAfter(new DiskRangeList(4, 6));
result = cache.getIncompleteCbs(fileKey1, ranges, 0, gotAllData);
assertFalse(gotAllData.value);
verifyResult(result, INCOMPLETE, 0, 3, DRL, 4, 6);
ranges = new DiskRangeList(0, 3);
ranges.insertAfter(new DiskRangeList(3, 5)).insertAfter(new DiskRangeList(5, 6));
result = cache.getIncompleteCbs(fileKey1, ranges, 0, gotAllData);
assertFalse(gotAllData.value);
verifyResult(result, INCOMPLETE, 0, 3, DRL, 3, 5, INCOMPLETE, 5, 6);
result = cache.getIncompleteCbs(fileKey1, new DiskRangeList(5, 6), 0, gotAllData);
assertTrue(gotAllData.value);
verifyResult(result, INCOMPLETE, 5, 6);
result = cache.getIncompleteCbs(fileKey1, new DiskRangeList(4, 5), 0, gotAllData);
assertFalse(gotAllData.value);
verifyResult(result, DRL, 4, 5);
}
use of org.apache.hadoop.hive.llap.metrics.LlapDaemonCacheMetrics in project hive by apache.
the class TestLlapCacheMetadataSerializer method setUp.
@Before
public void setUp() {
conf = new Configuration();
HiveConf.setIntVar(conf, HiveConf.ConfVars.LLAP_LRFU_BP_WRAPPER_SIZE, 1);
HiveConf.setFloatVar(conf, HiveConf.ConfVars.LLAP_LRFU_HOTBUFFERS_PERCENTAGE, 1.0f);
BuddyAllocator buddyAllocator = TestBuddyAllocatorForceEvict.create(16384, 2, 32768, false, true);
LlapDaemonCacheMetrics metrics = LlapDaemonCacheMetrics.create("", "");
cachePolicy = new LowLevelLrfuCachePolicy(1, 5, conf);
LowLevelCacheImpl cache = new LowLevelCacheImpl(metrics, cachePolicy, buddyAllocator, true);
fileMetadataCache = new MetadataCache(buddyAllocator, null, cachePolicy, false, metrics);
tracePool = IoTrace.createTracePool(conf);
mockDataCache = new LlapIoMocks.MockDataCache(cache, buddyAllocator, cachePolicy);
pathCache = new MemoryLimitedPathCache(conf);
tracePool = IoTrace.createTracePool(conf);
serializer = new LlapCacheMetadataSerializer(fileMetadataCache, mockDataCache, conf, pathCache, tracePool, cachePolicy);
}
Aggregations