use of io.pravega.segmentstore.server.MetadataBuilder in project pravega by pravega.
the class DurableLogTests method testRecoveryWithDisabledDataLog.
/**
* Verifies the ability of hte DurableLog to recover (delayed start) using a disabled DurableDataLog. This verifies
* the ability to shut down correctly while still waiting for the DataLog to become enabled as well as detecting that
* it did become enabled and then resume normal operations.
*/
@Test
public void testRecoveryWithDisabledDataLog() throws Exception {
int streamSegmentCount = 50;
int appendsPerStreamSegment = 20;
AtomicReference<TestDurableDataLog> dataLog = new AtomicReference<>();
@Cleanup TestDurableDataLogFactory dataLogFactory = new TestDurableDataLogFactory(new InMemoryDurableDataLogFactory(MAX_DATA_LOG_APPEND_SIZE, executorService()), dataLog::set);
@Cleanup Storage storage = InMemoryStorageFactory.newStorage(executorService());
storage.initialize(1);
@Cleanup InMemoryCacheFactory cacheFactory = new InMemoryCacheFactory();
@Cleanup CacheManager cacheManager = new CacheManager(DEFAULT_READ_INDEX_CONFIG.getCachePolicy(), executorService());
// Write some data to the log. We'll read it later.
HashSet<Long> streamSegmentIds;
List<Operation> originalOperations;
List<OperationWithCompletion> completionFutures;
UpdateableContainerMetadata metadata = new MetadataBuilder(CONTAINER_ID).build();
dataLog.set(null);
try (ReadIndex readIndex = new ContainerReadIndex(DEFAULT_READ_INDEX_CONFIG, metadata, cacheFactory, storage, cacheManager, executorService());
DurableLog durableLog = new DurableLog(ContainerSetup.defaultDurableLogConfig(), metadata, dataLogFactory, readIndex, executorService())) {
// DurableLog should start properly.
durableLog.startAsync().awaitRunning();
streamSegmentIds = createStreamSegmentsWithOperations(streamSegmentCount, metadata, durableLog, storage);
List<Operation> operations = generateOperations(streamSegmentIds, new HashMap<>(), appendsPerStreamSegment, METADATA_CHECKPOINT_EVERY, false, false);
completionFutures = processOperations(operations, durableLog);
OperationWithCompletion.allOf(completionFutures).join();
originalOperations = readUpToSequenceNumber(durableLog, metadata.getOperationSequenceNumber());
}
// Disable the DurableDataLog. This requires us to initialize the log, then disable it.
metadata = new MetadataBuilder(CONTAINER_ID).build();
dataLog.set(null);
try (ReadIndex readIndex = new ContainerReadIndex(DEFAULT_READ_INDEX_CONFIG, metadata, cacheFactory, storage, cacheManager, executorService());
DurableLog durableLog = new DurableLog(ContainerSetup.defaultDurableLogConfig(), metadata, dataLogFactory, readIndex, executorService())) {
// DurableLog should start properly.
durableLog.startAsync().awaitRunning();
CompletableFuture<Void> online = durableLog.awaitOnline();
Assert.assertTrue("awaitOnline() returned an incomplete future.", Futures.isSuccessful(online));
Assert.assertFalse("Not expecting an offline DurableLog.", durableLog.isOffline());
dataLog.get().disable();
}
// Verify that the DurableLog starts properly and that all operations throw appropriate exceptions.
try (ReadIndex readIndex = new ContainerReadIndex(DEFAULT_READ_INDEX_CONFIG, metadata, cacheFactory, storage, cacheManager, executorService());
DurableLog durableLog = new DurableLog(ContainerSetup.defaultDurableLogConfig(), metadata, dataLogFactory, readIndex, executorService())) {
// DurableLog should start properly.
durableLog.startAsync().awaitRunning();
CompletableFuture<Void> online = durableLog.awaitOnline();
Assert.assertFalse("awaitOnline() returned a completed future.", online.isDone());
Assert.assertTrue("Expecting an offline DurableLog.", durableLog.isOffline());
// Verify all operations fail with the right exception.
AssertExtensions.assertThrows("add() did not fail with the right exception when offline.", () -> durableLog.add(new ProbeOperation(), TIMEOUT), ex -> ex instanceof ContainerOfflineException);
AssertExtensions.assertThrows("read() did not fail with the right exception when offline.", () -> durableLog.read(0, 1, TIMEOUT), ex -> ex instanceof ContainerOfflineException);
AssertExtensions.assertThrows("truncate() did not fail with the right exception when offline.", () -> durableLog.truncate(0, TIMEOUT), ex -> ex instanceof ContainerOfflineException);
AssertExtensions.assertThrows("operationProcessingBarrier() did not fail with the right exception when offline.", () -> durableLog.operationProcessingBarrier(TIMEOUT), ex -> ex instanceof ContainerOfflineException);
// Verify we can also shut it down properly from this state.
durableLog.stopAsync().awaitTerminated();
Assert.assertTrue("awaitOnline() returned future did not fail when DurableLog shut down.", online.isCompletedExceptionally());
}
// Verify that, when the DurableDataLog becomes enabled, the DurableLog can pick up the change and resume normal operations.
// Verify that the DurableLog starts properly and that all operations throw appropriate exceptions.
dataLog.set(null);
try (ReadIndex readIndex = new ContainerReadIndex(DEFAULT_READ_INDEX_CONFIG, metadata, cacheFactory, storage, cacheManager, executorService());
DurableLog durableLog = new DurableLog(ContainerSetup.defaultDurableLogConfig(), metadata, dataLogFactory, readIndex, executorService())) {
// DurableLog should start properly.
durableLog.startAsync().awaitRunning();
CompletableFuture<Void> online = durableLog.awaitOnline();
Assert.assertFalse("awaitOnline() returned a completed future.", online.isDone());
// Enable the underlying data log and await for recovery to finish.
dataLog.get().enable();
online.get(START_RETRY_DELAY_MILLIS * 100, TimeUnit.MILLISECONDS);
Assert.assertFalse("Not expecting an offline DurableLog after re-enabling.", durableLog.isOffline());
// Verify we can still read the data that we wrote before the DataLog was disabled.
List<Operation> recoveredOperations = readUpToSequenceNumber(durableLog, metadata.getOperationSequenceNumber());
assertRecoveredOperationsMatch(originalOperations, recoveredOperations);
performMetadataChecks(streamSegmentIds, new HashSet<>(), new HashMap<>(), completionFutures, metadata, false, false);
performReadIndexChecks(completionFutures, readIndex);
// Stop the processor.
durableLog.stopAsync().awaitTerminated();
}
}
use of io.pravega.segmentstore.server.MetadataBuilder in project pravega by pravega.
the class ContainerMetadataUpdateTransactionTests method testSegmentMapMax.
/**
* Tests the ability to reject new StreamSegment/Transaction map operations that would exceed the max allowed counts.
*/
@Test
public void testSegmentMapMax() throws Exception {
UpdateableContainerMetadata metadata = new MetadataBuilder(CONTAINER_ID).withMaxActiveSegmentCount(3).build();
metadata.mapStreamSegmentId("a", SEGMENT_ID);
metadata.mapStreamSegmentId("a_txn1", 123457, SEGMENT_ID);
// Non-recovery mode.
val txn1 = createUpdateTransaction(metadata);
// Map one segment, which should fill up the quota.
StreamSegmentMapOperation acceptedMap = createMap();
txn1.preProcessOperation(acceptedMap);
txn1.acceptOperation(acceptedMap);
// Verify non-recovery mode.
AssertExtensions.assertThrows("Unexpected behavior from preProcessOperation when attempting to map a StreamSegment that would exceed the active segment quota.", () -> txn1.preProcessOperation(createMap("foo")), ex -> ex instanceof TooManyActiveSegmentsException);
AssertExtensions.assertThrows("Unexpected behavior from preProcessOperation when attempting to map a StreamSegment that would exceed the active segment quota.", () -> txn1.preProcessOperation(createTransactionMap(SEGMENT_ID, "foo")), ex -> ex instanceof TooManyActiveSegmentsException);
// Verify recovery mode.
metadata.enterRecoveryMode();
val txn2 = createUpdateTransaction(metadata);
// updater.setOperationSequenceNumber(10000);
StreamSegmentMapOperation secondMap = createMap("c");
secondMap.setStreamSegmentId(1234);
txn2.preProcessOperation(secondMap);
txn2.acceptOperation(secondMap);
StreamSegmentMapOperation secondTxnMap = createTransactionMap(SEGMENT_ID, "a_txn2");
secondTxnMap.setStreamSegmentId(1235);
txn2.preProcessOperation(secondTxnMap);
txn2.acceptOperation(secondTxnMap);
txn2.commit(metadata);
metadata.exitRecoveryMode();
Assert.assertNotNull("Updater did not create metadata for new segment in recovery mode even if quota is exceeded.", metadata.getStreamSegmentMetadata(secondMap.getStreamSegmentId()));
Assert.assertNotNull("Updater did not create metadata for new transaction in recovery mode even if quota is exceeded.", metadata.getStreamSegmentMetadata(secondTxnMap.getStreamSegmentId()));
}
use of io.pravega.segmentstore.server.MetadataBuilder in project pravega by pravega.
the class DurableLogTests method testTruncateWithRecovery.
/**
* Tests the truncate() method while performing recovery.
*/
@Test
public void testTruncateWithRecovery() {
int streamSegmentCount = 50;
int appendsPerStreamSegment = 20;
// Setup a DurableLog and start it.
AtomicReference<TestDurableDataLog> dataLog = new AtomicReference<>();
AtomicReference<Boolean> truncationOccurred = new AtomicReference<>();
@Cleanup TestDurableDataLogFactory dataLogFactory = new TestDurableDataLogFactory(new InMemoryDurableDataLogFactory(MAX_DATA_LOG_APPEND_SIZE, executorService()), dataLog::set);
@Cleanup Storage storage = InMemoryStorageFactory.newStorage(executorService());
storage.initialize(1);
UpdateableContainerMetadata metadata = new MetadataBuilder(CONTAINER_ID).build();
@Cleanup InMemoryCacheFactory cacheFactory = new InMemoryCacheFactory();
@Cleanup CacheManager cacheManager = new CacheManager(DEFAULT_READ_INDEX_CONFIG.getCachePolicy(), executorService());
@Cleanup ReadIndex readIndex = new ContainerReadIndex(DEFAULT_READ_INDEX_CONFIG, metadata, cacheFactory, storage, cacheManager, executorService());
HashSet<Long> streamSegmentIds;
List<OperationWithCompletion> completionFutures;
List<Operation> originalOperations;
// First DurableLog. We use this for generating data.
try (DurableLog durableLog = new DurableLog(ContainerSetup.defaultDurableLogConfig(), metadata, dataLogFactory, readIndex, executorService())) {
durableLog.startAsync().awaitRunning();
// Generate some test data (we need to do this after we started the DurableLog because in the process of
// recovery, it wipes away all existing metadata).
streamSegmentIds = createStreamSegmentsWithOperations(streamSegmentCount, metadata, durableLog, storage);
List<Operation> queuedOperations = generateOperations(streamSegmentIds, new HashMap<>(), appendsPerStreamSegment, METADATA_CHECKPOINT_EVERY, false, false);
completionFutures = processOperations(queuedOperations, durableLog);
OperationWithCompletion.allOf(completionFutures).join();
// Get a list of all the operations, before any truncation.
originalOperations = readUpToSequenceNumber(durableLog, metadata.getOperationSequenceNumber());
// Stop the processor.
durableLog.stopAsync().awaitTerminated();
}
// Truncate up to each MetadataCheckpointOperation and:
// * If the DataLog was truncated:
// ** Shut down DurableLog, re-start it (recovery) and verify the operations are as they should.
// At the end, verify all operations and all entries in the DataLog were truncated.
DurableLog durableLog = new DurableLog(ContainerSetup.defaultDurableLogConfig(), metadata, dataLogFactory, readIndex, executorService());
try {
durableLog.startAsync().awaitRunning();
dataLog.get().setTruncateCallback(seqNo -> truncationOccurred.set(true));
for (int i = 0; i < originalOperations.size(); i++) {
Operation currentOperation = originalOperations.get(i);
if (!(currentOperation instanceof MetadataCheckpointOperation)) {
// We can only truncate on MetadataCheckpointOperations.
continue;
}
truncationOccurred.set(false);
durableLog.truncate(currentOperation.getSequenceNumber(), TIMEOUT).join();
if (truncationOccurred.get()) {
// Close current DurableLog and start a brand new one, forcing recovery.
durableLog.close();
durableLog = new DurableLog(ContainerSetup.defaultDurableLogConfig(), metadata, dataLogFactory, readIndex, executorService());
durableLog.startAsync().awaitRunning();
dataLog.get().setTruncateCallback(seqNo -> truncationOccurred.set(true));
// Verify all operations up to, and including this one have been removed.
Iterator<Operation> reader = durableLog.read(-1, 2, TIMEOUT).join();
Assert.assertTrue("Not expecting an empty log after truncating an operation (a MetadataCheckpoint must always exist).", reader.hasNext());
verifyFirstItemIsMetadataCheckpoint(reader);
if (i < originalOperations.size() - 1) {
Operation firstOp = reader.next();
OperationComparer.DEFAULT.assertEquals(String.format("Unexpected first operation after truncating SeqNo %d.", currentOperation.getSequenceNumber()), originalOperations.get(i + 1), firstOp);
}
}
}
} finally {
// This closes whatever current instance this variable refers to, not necessarily the first one.
durableLog.close();
}
}
use of io.pravega.segmentstore.server.MetadataBuilder in project pravega by pravega.
the class DurableLogTests method testRecoveryWithMetadataCleanup.
/**
* Tests the following recovery scenario:
* 1. A Segment is created and recorded in the metadata with some optional operations executing on it.
* 2. The segment is evicted from the metadata.
* 3. The segment is reactivated (with a new metadata mapping) - possibly due to an append. No truncation since #2.
* 4. Recovery.
*/
@Test
public void testRecoveryWithMetadataCleanup() throws Exception {
final long truncatedSeqNo = Integer.MAX_VALUE;
// Setup a DurableLog and start it.
@Cleanup TestDurableDataLogFactory dataLogFactory = new TestDurableDataLogFactory(new InMemoryDurableDataLogFactory(MAX_DATA_LOG_APPEND_SIZE, executorService()));
@Cleanup Storage storage = InMemoryStorageFactory.newStorage(executorService());
storage.initialize(1);
long segmentId;
// First DurableLog. We use this for generating data.
val metadata1 = (StreamSegmentContainerMetadata) new MetadataBuilder(CONTAINER_ID).build();
@Cleanup InMemoryCacheFactory cacheFactory = new InMemoryCacheFactory();
@Cleanup CacheManager cacheManager = new CacheManager(DEFAULT_READ_INDEX_CONFIG.getCachePolicy(), executorService());
SegmentProperties originalSegmentInfo;
try (ReadIndex readIndex = new ContainerReadIndex(DEFAULT_READ_INDEX_CONFIG, metadata1, cacheFactory, storage, cacheManager, executorService());
DurableLog durableLog = new DurableLog(ContainerSetup.defaultDurableLogConfig(), metadata1, dataLogFactory, readIndex, executorService())) {
durableLog.startAsync().awaitRunning();
// Create the segment.
val segmentIds = createStreamSegmentsWithOperations(1, metadata1, durableLog, storage);
segmentId = segmentIds.stream().findFirst().orElse(-1L);
// Evict the segment.
val sm1 = metadata1.getStreamSegmentMetadata(segmentId);
originalSegmentInfo = sm1.getSnapshot();
// Simulate a truncation. This is needed in order to trigger a cleanup.
metadata1.removeTruncationMarkers(truncatedSeqNo);
val cleanedUpSegments = metadata1.cleanup(Collections.singleton(sm1), truncatedSeqNo);
Assert.assertEquals("Unexpected number of segments evicted.", 1, cleanedUpSegments.size());
// Map the segment again.
val reMapOp = new StreamSegmentMapOperation(originalSegmentInfo);
reMapOp.setStreamSegmentId(segmentId);
durableLog.add(reMapOp, TIMEOUT).join();
// Stop.
durableLog.stopAsync().awaitTerminated();
}
// Recovery #1. This should work well.
val metadata2 = (StreamSegmentContainerMetadata) new MetadataBuilder(CONTAINER_ID).build();
try (ReadIndex readIndex = new ContainerReadIndex(DEFAULT_READ_INDEX_CONFIG, metadata2, cacheFactory, storage, cacheManager, executorService());
DurableLog durableLog = new DurableLog(ContainerSetup.defaultDurableLogConfig(), metadata2, dataLogFactory, readIndex, executorService())) {
durableLog.startAsync().awaitRunning();
// Get segment info
val recoveredSegmentInfo = metadata1.getStreamSegmentMetadata(segmentId).getSnapshot();
Assert.assertEquals("Unexpected length from recovered segment.", originalSegmentInfo.getLength(), recoveredSegmentInfo.getLength());
// Now evict the segment again ...
val sm = metadata2.getStreamSegmentMetadata(segmentId);
// Simulate a truncation. This is needed in order to trigger a cleanup.
metadata2.removeTruncationMarkers(truncatedSeqNo);
val cleanedUpSegments = metadata2.cleanup(Collections.singleton(sm), truncatedSeqNo);
Assert.assertEquals("Unexpected number of segments evicted.", 1, cleanedUpSegments.size());
// ... and re-map it with a new Id. This is a perfectly valid operation, and we can't prevent it.
durableLog.add(new StreamSegmentMapOperation(originalSegmentInfo), TIMEOUT).join();
// Stop.
durableLog.stopAsync().awaitTerminated();
}
// Recovery #2. This should fail due to the same segment mapped multiple times with different ids.
val metadata3 = (StreamSegmentContainerMetadata) new MetadataBuilder(CONTAINER_ID).build();
try (ReadIndex readIndex = new ContainerReadIndex(DEFAULT_READ_INDEX_CONFIG, metadata3, cacheFactory, storage, cacheManager, executorService());
DurableLog durableLog = new DurableLog(ContainerSetup.defaultDurableLogConfig(), metadata3, dataLogFactory, readIndex, executorService())) {
AssertExtensions.assertThrows("Recovery did not fail with the expected exception in case of multi-mapping", () -> durableLog.startAsync().awaitRunning(), ex -> ex instanceof IllegalStateException && ex.getCause() instanceof DataCorruptionException && ex.getCause().getCause() instanceof MetadataUpdateException);
}
}
use of io.pravega.segmentstore.server.MetadataBuilder in project pravega by pravega.
the class MemoryStateUpdaterTests method testRecoveryMode.
/**
* Tests the ability of the MemoryStateUpdater to delegate Enter/Exit recovery mode to the read index.
*/
@Test
public void testRecoveryMode() throws Exception {
// Check it's properly delegated to Read index.
SequencedItemList<Operation> opLog = new SequencedItemList<>();
ArrayList<TestReadIndex.MethodInvocation> methodInvocations = new ArrayList<>();
TestReadIndex readIndex = new TestReadIndex(methodInvocations::add);
MemoryStateUpdater updater = new MemoryStateUpdater(opLog, readIndex, Runnables.doNothing());
UpdateableContainerMetadata metadata1 = new MetadataBuilder(1).build();
updater.enterRecoveryMode(metadata1);
updater.exitRecoveryMode(true);
Assert.assertEquals("Unexpected number of method invocations.", 2, methodInvocations.size());
TestReadIndex.MethodInvocation enterRecovery = methodInvocations.get(0);
Assert.assertEquals("ReadIndex.enterRecoveryMode was not called when expected.", TestReadIndex.ENTER_RECOVERY_MODE, enterRecovery.methodName);
Assert.assertEquals("ReadIndex.enterRecoveryMode was called with the wrong arguments.", metadata1, enterRecovery.args.get("recoveryMetadataSource"));
TestReadIndex.MethodInvocation exitRecovery = methodInvocations.get(1);
Assert.assertEquals("ReadIndex.exitRecoveryMode was not called when expected.", TestReadIndex.EXIT_RECOVERY_MODE, exitRecovery.methodName);
Assert.assertEquals("ReadIndex.exitRecoveryMode was called with the wrong arguments.", true, exitRecovery.args.get("successfulRecovery"));
}
Aggregations