use of io.pravega.segmentstore.storage.cache.DirectMemoryCache in project pravega by pravega.
the class DurableLogTests method testRecoveryWithMetadataCleanup.
/**
* Tests the following recovery scenario:
* 1. A Segment is created and recorded in the metadata with some optional operations executing on it.
* 2. The segment is evicted from the metadata.
* 3. The segment is reactivated (with a new metadata mapping) - possibly due to an append. No truncation since #2.
* 4. Recovery.
*/
@Test
public void testRecoveryWithMetadataCleanup() throws Exception {
final long truncatedSeqNo = Integer.MAX_VALUE;
// Setup a DurableLog and start it.
@Cleanup TestDurableDataLogFactory dataLogFactory = new TestDurableDataLogFactory(new InMemoryDurableDataLogFactory(MAX_DATA_LOG_APPEND_SIZE, executorService()));
@Cleanup Storage storage = InMemoryStorageFactory.newStorage(executorService());
storage.initialize(1);
long segmentId;
// First DurableLog. We use this for generating data.
val metadata1 = (StreamSegmentContainerMetadata) new MetadataBuilder(CONTAINER_ID).build();
@Cleanup CacheStorage cacheStorage = new DirectMemoryCache(Integer.MAX_VALUE);
@Cleanup CacheManager cacheManager = new CacheManager(CachePolicy.INFINITE, cacheStorage, executorService());
SegmentProperties originalSegmentInfo;
try (ReadIndex readIndex = new ContainerReadIndex(DEFAULT_READ_INDEX_CONFIG, metadata1, storage, cacheManager, executorService());
DurableLog durableLog = new DurableLog(ContainerSetup.defaultDurableLogConfig(), metadata1, dataLogFactory, readIndex, executorService())) {
durableLog.startAsync().awaitRunning();
// Create the segment.
val segmentIds = createStreamSegmentsWithOperations(1, durableLog);
segmentId = segmentIds.stream().findFirst().orElse(-1L);
// Evict the segment.
val sm1 = metadata1.getStreamSegmentMetadata(segmentId);
originalSegmentInfo = sm1.getSnapshot();
// Simulate a truncation. This is needed in order to trigger a cleanup.
metadata1.removeTruncationMarkers(truncatedSeqNo);
val cleanedUpSegments = metadata1.cleanup(Collections.singleton(sm1), truncatedSeqNo);
Assert.assertEquals("Unexpected number of segments evicted.", 1, cleanedUpSegments.size());
// Map the segment again.
val reMapOp = new StreamSegmentMapOperation(originalSegmentInfo);
reMapOp.setStreamSegmentId(segmentId);
durableLog.add(reMapOp, OperationPriority.Normal, TIMEOUT).join();
// Stop.
durableLog.stopAsync().awaitTerminated();
}
// Recovery #1. This should work well.
val metadata2 = (StreamSegmentContainerMetadata) new MetadataBuilder(CONTAINER_ID).build();
try (ReadIndex readIndex = new ContainerReadIndex(DEFAULT_READ_INDEX_CONFIG, metadata2, storage, cacheManager, executorService());
DurableLog durableLog = new DurableLog(ContainerSetup.defaultDurableLogConfig(), metadata2, dataLogFactory, readIndex, executorService())) {
durableLog.startAsync().awaitRunning();
// Get segment info
val recoveredSegmentInfo = metadata1.getStreamSegmentMetadata(segmentId).getSnapshot();
Assert.assertEquals("Unexpected length from recovered segment.", originalSegmentInfo.getLength(), recoveredSegmentInfo.getLength());
// Now evict the segment again ...
val sm = metadata2.getStreamSegmentMetadata(segmentId);
// Simulate a truncation. This is needed in order to trigger a cleanup.
metadata2.removeTruncationMarkers(truncatedSeqNo);
val cleanedUpSegments = metadata2.cleanup(Collections.singleton(sm), truncatedSeqNo);
Assert.assertEquals("Unexpected number of segments evicted.", 1, cleanedUpSegments.size());
// ... and re-map it with a new Id. This is a perfectly valid operation, and we can't prevent it.
durableLog.add(new StreamSegmentMapOperation(originalSegmentInfo), OperationPriority.Normal, TIMEOUT).join();
// Stop.
durableLog.stopAsync().awaitTerminated();
}
// Recovery #2. This should fail due to the same segment mapped multiple times with different ids.
val metadata3 = (StreamSegmentContainerMetadata) new MetadataBuilder(CONTAINER_ID).build();
try (ReadIndex readIndex = new ContainerReadIndex(DEFAULT_READ_INDEX_CONFIG, metadata3, storage, cacheManager, executorService());
DurableLog durableLog = new DurableLog(ContainerSetup.defaultDurableLogConfig(), metadata3, dataLogFactory, readIndex, executorService())) {
AssertExtensions.assertThrows("Recovery did not fail with the expected exception in case of multi-mapping", () -> durableLog.startAsync().awaitRunning(), ex -> ex instanceof IllegalStateException && ex.getCause() instanceof DataCorruptionException && ex.getCause().getCause() instanceof MetadataUpdateException);
}
}
use of io.pravega.segmentstore.storage.cache.DirectMemoryCache in project pravega by pravega.
the class ContainerKeyCacheTests method testMigrationCandidateFailedCacheFull.
/**
* Test the {@link SegmentKeyCache.MigrationCandidate} class when cache is full.
*/
@Test
public void testMigrationCandidateFailedCacheFull() {
@Cleanup val fullCache = new DirectMemoryCache(10);
fullCache.insert(new ByteArraySegment(new byte[(int) (fullCache.getState().getMaxBytes() - fullCache.getBlockAlignment())]));
val keyCache = new SegmentKeyCache(1L, fullCache);
val rnd = new Random(0);
val batch = TableKeyBatch.update();
val key = newTableKey(rnd);
val keyHash = KEY_HASHER.hash(key.getKey());
batch.add(key, keyHash, key.getKey().getLength());
keyCache.includeUpdateBatch(batch, 0, 0);
List<SegmentKeyCache.CacheEntry> evictedEntries = null;
try {
// Migrate the tail index to a Cache Entry.
keyCache.setLastIndexedOffset(batch.getLength() + 1, 1);
// Try to evict all entries.
evictedEntries = keyCache.evictAll();
// We expect no evictions because we shouldn't have inserted anything.
Assert.assertEquals(0, evictedEntries.size());
} finally {
// Clean up the cache in case of an error. We do not want to leave data hanging around in the Cache.
if (evictedEntries != null) {
evictedEntries.forEach(SegmentKeyCache.CacheEntry::evict);
}
}
}
Aggregations