use of io.pravega.segmentstore.storage.cache.DirectMemoryCache in project pravega by pravega.
the class CacheManagerTests method testCleanupListeners.
/**
* Tests the ability to register, invoke and auto-unregister {@link ThrottleSourceListener} instances.
*/
@Test
public void testCleanupListeners() {
final CachePolicy policy = new CachePolicy(1024, Duration.ofHours(1), Duration.ofHours(1));
@Cleanup val cache = new DirectMemoryCache(policy.getMaxSize());
@Cleanup TestCacheManager cm = new TestCacheManager(policy, cache, executorService());
TestClient client = new TestClient();
cm.register(client);
TestCleanupListener l1 = new TestCleanupListener();
TestCleanupListener l2 = new TestCleanupListener();
cm.getUtilizationProvider().registerCleanupListener(l1);
cm.getUtilizationProvider().registerCleanupListener(l2);
// We always remove something.
client.setUpdateGenerationsImpl((current, oldest, essentialOnly) -> true);
// In the first iteration, we should invoke both listeners.
client.setCacheStatus(0, 0);
// Put something in the cache so the cleanup can execute.
cache.insert(new ByteArraySegment(new byte[1]));
cm.runOneIteration();
Assert.assertEquals("Expected cleanup listener to be invoked the first time.", 1, l1.getCallCount());
Assert.assertEquals("Expected cleanup listener to be invoked the first time.", 1, l2.getCallCount());
// Close one of the listeners, and verify that only the other one is invoked now.
l2.setClosed(true);
client.setCacheStatus(0, 1);
cm.runOneIteration();
Assert.assertEquals("Expected cleanup listener to be invoked the second time.", 2, l1.getCallCount());
Assert.assertEquals("Not expecting cleanup listener to be invoked the second time for closed listener.", 1, l2.getCallCount());
// This should have no effect.
cm.getUtilizationProvider().registerCleanupListener(l2);
}
use of io.pravega.segmentstore.storage.cache.DirectMemoryCache in project pravega by pravega.
the class CacheManagerTests method testCacheFullCleanup.
/**
* Tests the ability to auto-cleanup the cache if it indicates it has reached capacity and needs some eviction(s)
* in order to accommodate more data.
*/
@Test
public void testCacheFullCleanup() {
final CachePolicy policy = new CachePolicy(1024, Duration.ofHours(1), Duration.ofHours(1));
@Cleanup val cache = new DirectMemoryCache(policy.getMaxSize());
int maxCacheSize = (int) cache.getState().getMaxBytes();
@Cleanup TestCacheManager cm = new TestCacheManager(policy, cache, executorService());
TestClient client = new TestClient();
cm.register(client);
// Almost fill up the cache.
int length1 = maxCacheSize / 2;
val write1 = cache.insert(new ByteArraySegment(new byte[length1]));
// Setup the TestClient to evict write1 when requested.
val cleanupRequestCount = new AtomicInteger(0);
client.setCacheStatus(0, 1);
client.setUpdateGenerationsImpl((ng, og, essentialOnly) -> {
cleanupRequestCount.incrementAndGet();
cache.delete(write1);
return true;
});
// Insert an entry that would fill up the cache.
int length2 = maxCacheSize / 2 + 1;
val write2 = cache.insert(new ByteArraySegment(new byte[length2]));
// Verify we were asked to cleanup.
Assert.assertEquals("Unexpected number of cleanup requests.", 1, cleanupRequestCount.get());
Assert.assertEquals("New entry was not inserted.", length2, cache.get(write2).getLength());
Assert.assertEquals("Unexpected number of stored bytes.", length2, cache.getState().getStoredBytes());
}
use of io.pravega.segmentstore.storage.cache.DirectMemoryCache in project pravega by pravega.
the class CacheManagerTests method testApplyPolicyConcurrency.
/**
* Tests the ability to handle concurrent requests to {@link CacheManager#applyCachePolicy()}.
*/
@Test
public void testApplyPolicyConcurrency() throws Exception {
// Almost fill up the cache.
final CachePolicy policy = new CachePolicy(1024, Duration.ofHours(1), Duration.ofHours(1));
@Cleanup val cache = new DirectMemoryCache(policy.getMaxSize());
int maxCacheSize = (int) cache.getState().getMaxBytes();
@Cleanup TestCacheManager cm = new TestCacheManager(policy, cache, executorService());
TestClient client = new TestClient();
cm.register(client);
// Almost fill up the cache (75%)
int initialLength = maxCacheSize * 3 / 4;
val initialWrite = cache.insert(new ByteArraySegment(new byte[initialLength]));
// Setup the TestClient to evict write1 when requested.
val firstCleanupRequested = new CompletableFuture<Void>();
val firstCleanupBlock = new CompletableFuture<Void>();
val cleanupRequestCount = new AtomicInteger(0);
val concurrentRequest = new AtomicBoolean(false);
client.setCacheStatus(0, 1);
client.setUpdateGenerationsImpl((ng, og, essentialOnly) -> {
int rc = cleanupRequestCount.incrementAndGet();
if (rc == 1) {
// This is the first concurrent request requesting a cleanup.
// Notify that cleanup has been requested.
firstCleanupRequested.complete(null);
// Wait until we are ready to proceed.
firstCleanupBlock.join();
// We only need to delete this once.
cache.delete(initialWrite);
} else {
// This is the second concurrent request requesting a cleanup.
if (!firstCleanupBlock.isDone()) {
// This has executed before the first request completed.
concurrentRequest.set(true);
}
}
return true;
});
// Send one write that would end up filling the cache.
int length1 = maxCacheSize / 3;
val write1Future = CompletableFuture.supplyAsync(() -> cache.insert(new ByteArraySegment(new byte[length1])), executorService());
// Wait for the cleanup to be requested.
firstCleanupRequested.get(CLEANUP_TIMEOUT_MILLIS, TimeUnit.MILLISECONDS);
// Send another write that would also fill up the cache.
int length2 = length1 + 1;
val write2Future = CompletableFuture.supplyAsync(() -> cache.insert(new ByteArraySegment(new byte[length2])), executorService());
// Unblock the first cleanup.
firstCleanupBlock.complete(null);
// Get the results of the two suspended writes.
val write1 = write1Future.get(CLEANUP_TIMEOUT_MILLIS, TimeUnit.MILLISECONDS);
val write2 = write2Future.get(CLEANUP_TIMEOUT_MILLIS, TimeUnit.MILLISECONDS);
// Verify that things did work as intended.
Assert.assertFalse("Concurrent call to applyCachePolicy detected.", concurrentRequest.get());
AssertExtensions.assertGreaterThanOrEqual("Unexpected number of cleanup requests.", 1, cleanupRequestCount.get());
Assert.assertEquals("Unexpected entry #2.", length1, cache.get(write1).getLength());
Assert.assertEquals("Unexpected entry #3.", length2, cache.get(write2).getLength());
}
use of io.pravega.segmentstore.storage.cache.DirectMemoryCache in project pravega by pravega.
the class DurableLogTests method testRecoveryWithIncrementalCheckpoints.
/**
* Tests the DurableLog recovery process when there are multiple {@link MetadataCheckpointOperation}s added, with each
* such checkpoint including information about evicted segments or segments which had their storage state modified.
*/
@Test
public void testRecoveryWithIncrementalCheckpoints() throws Exception {
final int streamSegmentCount = 50;
// Setup a DurableLog and start it.
@Cleanup TestDurableDataLogFactory dataLogFactory = new TestDurableDataLogFactory(new InMemoryDurableDataLogFactory(MAX_DATA_LOG_APPEND_SIZE, executorService()));
@Cleanup Storage storage = InMemoryStorageFactory.newStorage(executorService());
storage.initialize(1);
// First DurableLog. We use this for generating data.
val metadata1 = new MetadataBuilder(CONTAINER_ID).build();
@Cleanup CacheStorage cacheStorage = new DirectMemoryCache(Integer.MAX_VALUE);
@Cleanup CacheManager cacheManager = new CacheManager(CachePolicy.INFINITE, cacheStorage, executorService());
List<Long> deletedIds;
Set<Long> evictIds;
try (ReadIndex readIndex = new ContainerReadIndex(DEFAULT_READ_INDEX_CONFIG, metadata1, storage, cacheManager, executorService());
DurableLog durableLog = new DurableLog(ContainerSetup.defaultDurableLogConfig(), metadata1, dataLogFactory, readIndex, executorService())) {
durableLog.startAsync().awaitRunning();
// Create some segments.
val segmentIds = new ArrayList<>(createStreamSegmentsWithOperations(streamSegmentCount, durableLog));
deletedIds = segmentIds.subList(0, 5);
val mergedFromIds = segmentIds.subList(5, 10);
// Must be same length as mergeFrom
val mergedToIds = segmentIds.subList(10, 15);
evictIds = new HashSet<>(segmentIds.subList(15, 20));
val changeStorageStateIds = segmentIds.subList(20, segmentIds.size() - 5);
// Append something to each segment.
for (val segmentId : segmentIds) {
if (!evictIds.contains(segmentId)) {
durableLog.add(new StreamSegmentAppendOperation(segmentId, generateAppendData((int) (long) segmentId), null), OperationPriority.Normal, TIMEOUT).join();
}
}
// Checkpoint 1.
durableLog.checkpoint(TIMEOUT).join();
// Delete some segments.
for (val segmentId : deletedIds) {
durableLog.add(new DeleteSegmentOperation(segmentId), OperationPriority.Normal, TIMEOUT).join();
}
// Checkpoint 2.
durableLog.checkpoint(TIMEOUT).join();
// Merge some segments.
for (int i = 0; i < mergedFromIds.size(); i++) {
durableLog.add(new StreamSegmentSealOperation(mergedFromIds.get(i)), OperationPriority.Normal, TIMEOUT).join();
durableLog.add(new MergeSegmentOperation(mergedToIds.get(i), mergedFromIds.get(i)), OperationPriority.Normal, TIMEOUT).join();
}
// Checkpoint 3.
durableLog.checkpoint(TIMEOUT).join();
// Evict some segments.
val evictableContainerMetadata = (EvictableMetadata) metadata1;
metadata1.removeTruncationMarkers(metadata1.getOperationSequenceNumber());
val toEvict = evictableContainerMetadata.getEvictionCandidates(Integer.MAX_VALUE, segmentIds.size()).stream().filter(m -> evictIds.contains(m.getId())).collect(Collectors.toList());
val evicted = evictableContainerMetadata.cleanup(toEvict, Integer.MAX_VALUE);
AssertExtensions.assertContainsSameElements("", evictIds, evicted.stream().map(SegmentMetadata::getId).collect(Collectors.toList()));
// Checkpoint 4.
durableLog.checkpoint(TIMEOUT).join();
// Update storage state for some segments.
for (val segmentId : changeStorageStateIds) {
val sm = metadata1.getStreamSegmentMetadata(segmentId);
if (segmentId % 3 == 0) {
sm.setStorageLength(sm.getLength());
}
if (segmentId % 4 == 0) {
sm.markSealed();
sm.markSealedInStorage();
}
if (segmentId % 5 == 0) {
sm.markDeleted();
sm.markDeletedInStorage();
}
}
// Checkpoint 5.
durableLog.checkpoint(TIMEOUT).join();
// Stop the processor.
durableLog.stopAsync().awaitTerminated();
}
// Second DurableLog. We use this for recovery.
val metadata2 = new MetadataBuilder(CONTAINER_ID).build();
try (ContainerReadIndex readIndex = new ContainerReadIndex(DEFAULT_READ_INDEX_CONFIG, metadata2, storage, cacheManager, executorService());
DurableLog durableLog = new DurableLog(ContainerSetup.defaultDurableLogConfig(), metadata2, dataLogFactory, readIndex, executorService())) {
durableLog.startAsync().awaitRunning();
// Validate metadata matches.
val expectedSegmentIds = metadata1.getAllStreamSegmentIds();
val actualSegmentIds = metadata2.getAllStreamSegmentIds();
AssertExtensions.assertContainsSameElements("Unexpected set of recovered segments. Only Active segments expected to have been recovered.", expectedSegmentIds, actualSegmentIds);
val expectedSegments = expectedSegmentIds.stream().sorted().map(metadata1::getStreamSegmentMetadata).collect(Collectors.toList());
val actualSegments = actualSegmentIds.stream().sorted().map(metadata2::getStreamSegmentMetadata).collect(Collectors.toList());
for (int i = 0; i < expectedSegments.size(); i++) {
val e = expectedSegments.get(i);
val a = actualSegments.get(i);
SegmentMetadataComparer.assertEquals("Recovered segment metadata mismatch", e, a);
}
// Validate read index is as it should. Here, we can only check if the read indices for evicted segments are
// no longer loaded; we do more thorough checks in the ContainerReadIndexTests suite.
Streams.concat(evictIds.stream(), deletedIds.stream()).forEach(segmentId -> Assert.assertNull("Not expecting a read index for an evicted or deleted segment.", readIndex.getIndex(segmentId)));
// Stop the processor.
durableLog.stopAsync().awaitTerminated();
}
}
use of io.pravega.segmentstore.storage.cache.DirectMemoryCache in project pravega by pravega.
the class DurableLogTests method testTruncateWithRecovery.
/**
* Tests the truncate() method while performing recovery.
*/
@Test
public void testTruncateWithRecovery() {
int streamSegmentCount = 50;
int appendsPerStreamSegment = 20;
// Setup a DurableLog and start it.
AtomicReference<TestDurableDataLog> dataLog = new AtomicReference<>();
AtomicReference<Boolean> truncationOccurred = new AtomicReference<>();
@Cleanup TestDurableDataLogFactory dataLogFactory = new TestDurableDataLogFactory(new InMemoryDurableDataLogFactory(MAX_DATA_LOG_APPEND_SIZE, executorService()), dataLog::set);
@Cleanup Storage storage = InMemoryStorageFactory.newStorage(executorService());
storage.initialize(1);
UpdateableContainerMetadata metadata = new MetadataBuilder(CONTAINER_ID).build();
@Cleanup CacheStorage cacheStorage = new DirectMemoryCache(Integer.MAX_VALUE);
@Cleanup CacheManager cacheManager = new CacheManager(CachePolicy.INFINITE, cacheStorage, executorService());
@Cleanup ReadIndex readIndex = new ContainerReadIndex(DEFAULT_READ_INDEX_CONFIG, metadata, storage, cacheManager, executorService());
Set<Long> streamSegmentIds;
List<OperationWithCompletion> completionFutures;
List<Operation> originalOperations;
// First DurableLog. We use this for generating data.
try (DurableLog durableLog = new DurableLog(ContainerSetup.defaultDurableLogConfig(), metadata, dataLogFactory, readIndex, executorService())) {
durableLog.startAsync().awaitRunning();
// Generate some test data (we need to do this after we started the DurableLog because in the process of
// recovery, it wipes away all existing metadata).
streamSegmentIds = createStreamSegmentsWithOperations(streamSegmentCount, durableLog);
List<Operation> queuedOperations = generateOperations(streamSegmentIds, new HashMap<>(), appendsPerStreamSegment, METADATA_CHECKPOINT_EVERY, false, false);
completionFutures = processOperations(queuedOperations, durableLog);
OperationWithCompletion.allOf(completionFutures).join();
// Get a list of all the operations, before any truncation.
originalOperations = readUpToSequenceNumber(durableLog, metadata.getOperationSequenceNumber());
// Stop the processor.
durableLog.stopAsync().awaitTerminated();
}
// Truncate up to each MetadataCheckpointOperation and:
// * If the DataLog was truncated:
// ** Shut down DurableLog, re-start it (recovery) and verify the operations are as they should.
// At the end, verify all operations and all entries in the DataLog were truncated.
DurableLog durableLog = new DurableLog(ContainerSetup.defaultDurableLogConfig(), metadata, dataLogFactory, readIndex, executorService());
try {
durableLog.startAsync().awaitRunning();
dataLog.get().setTruncateCallback(seqNo -> truncationOccurred.set(true));
for (int i = 0; i < originalOperations.size(); i++) {
Operation currentOperation = originalOperations.get(i);
if (!(currentOperation instanceof MetadataCheckpointOperation)) {
// We can only truncate on MetadataCheckpointOperations.
continue;
}
truncationOccurred.set(false);
durableLog.truncate(currentOperation.getSequenceNumber(), TIMEOUT).join();
if (truncationOccurred.get()) {
// Close current DurableLog and start a brand new one, forcing recovery.
durableLog.close();
durableLog = new DurableLog(ContainerSetup.defaultDurableLogConfig(), metadata, dataLogFactory, readIndex, executorService());
durableLog.startAsync().awaitRunning();
dataLog.get().setTruncateCallback(seqNo -> truncationOccurred.set(true));
// Verify all operations up to, and including this one have been removed.
Queue<Operation> reader = durableLog.read(2, TIMEOUT).join();
Assert.assertFalse("Not expecting an empty log after truncating an operation (a MetadataCheckpoint must always exist).", reader.isEmpty());
verifyFirstItemIsMetadataCheckpoint(reader.iterator());
if (i < originalOperations.size() - 1) {
Operation firstOp = reader.poll();
OperationComparer.DEFAULT.assertEquals(String.format("Unexpected first operation after truncating SeqNo %d.", currentOperation.getSequenceNumber()), originalOperations.get(i + 1), firstOp);
}
}
}
} finally {
// This closes whatever current instance this variable refers to, not necessarily the first one.
durableLog.close();
}
}
Aggregations