use of io.pravega.segmentstore.server.UpdateableContainerMetadata in project pravega by pravega.
the class StreamSegmentContainerMetadataTests method testMapStreamSegment.
/**
* Tests the ability to map new StreamSegments (as well as Transactions).
*/
@Test
public void testMapStreamSegment() {
final UpdateableContainerMetadata m = new MetadataBuilder(CONTAINER_ID).build();
final HashMap<Long, Long> segmentIds = new HashMap<>();
for (long i = 0; i < SEGMENT_COUNT; i++) {
final long segmentId = segmentIds.size();
String segmentName = getName(segmentId);
// This should work.
// Change the sequence number, before mapping.
m.nextOperationSequenceNumber();
m.mapStreamSegmentId(segmentName, segmentId);
segmentIds.put(segmentId, m.getOperationSequenceNumber());
Assert.assertEquals("Unexpected value from getStreamSegmentId (Stand-alone Segment).", segmentId, m.getStreamSegmentId(segmentName, false));
// Now check that we cannot re-map the same SegmentId or SegmentName.
AssertExtensions.assertThrows("mapStreamSegmentId allowed mapping the same SegmentId twice.", () -> m.mapStreamSegmentId(segmentName + "foo", segmentId), ex -> ex instanceof IllegalArgumentException);
AssertExtensions.assertThrows("mapStreamSegmentId allowed mapping the same SegmentName twice.", () -> m.mapStreamSegmentId(segmentName, segmentId + 1), ex -> ex instanceof IllegalArgumentException);
for (long j = 0; j < TRANSACTIONS_PER_SEGMENT_COUNT; j++) {
final long transactionId = segmentIds.size();
String transactionName = getName(transactionId);
AssertExtensions.assertThrows("mapStreamSegmentId allowed mapping a Transaction to an inexistent parent.", () -> m.mapStreamSegmentId(transactionName, transactionId, transactionId), ex -> ex instanceof IllegalArgumentException);
// This should work.
// Change the sequence number, before mapping.
m.nextOperationSequenceNumber();
m.mapStreamSegmentId(transactionName, transactionId, segmentId);
segmentIds.put(transactionId, m.getOperationSequenceNumber());
Assert.assertEquals("Unexpected value from getStreamSegmentId (Transaction Segment).", transactionId, m.getStreamSegmentId(transactionName, false));
// Now check that we cannot re-map the same Transaction Id or Name.
AssertExtensions.assertThrows("mapStreamSegmentId allowed mapping the same Transaction SegmentId twice.", () -> m.mapStreamSegmentId(transactionName + "foo", transactionId, segmentId), ex -> ex instanceof IllegalArgumentException);
AssertExtensions.assertThrows("mapStreamSegmentId allowed mapping the same Transaction SegmentName twice.", () -> m.mapStreamSegmentId(transactionName, transactionId + 1, segmentId), ex -> ex instanceof IllegalArgumentException);
// Now check that we cannot map a Transaction to another Transaction.
AssertExtensions.assertThrows("mapStreamSegmentId allowed mapping the a Transaction to another Transaction.", () -> m.mapStreamSegmentId(transactionName + "foo", transactionId + 1, transactionId), ex -> ex instanceof IllegalArgumentException);
}
}
// Check getLastUsed.
for (Map.Entry<Long, Long> e : segmentIds.entrySet()) {
// Increment the SeqNo so we can verify 'updateLastUsed'.
m.nextOperationSequenceNumber();
SegmentMetadata segmentMetadata = m.getStreamSegmentMetadata(e.getKey());
Assert.assertEquals("Unexpected value for getLastUsed for untouched segment.", (long) e.getValue(), segmentMetadata.getLastUsed());
m.getStreamSegmentId(segmentMetadata.getName(), false);
Assert.assertEquals("Unexpected value for getLastUsed for untouched segment.", (long) e.getValue(), segmentMetadata.getLastUsed());
m.getStreamSegmentId(segmentMetadata.getName(), true);
Assert.assertEquals("Unexpected value for getLastUsed for touched segment.", m.getOperationSequenceNumber(), segmentMetadata.getLastUsed());
}
Collection<Long> metadataSegmentIds = m.getAllStreamSegmentIds();
AssertExtensions.assertContainsSameElements("Metadata does not contain the expected Segment Ids", segmentIds.keySet(), metadataSegmentIds);
}
use of io.pravega.segmentstore.server.UpdateableContainerMetadata in project pravega by pravega.
the class StreamSegmentContainerMetadataTests method testMaxActiveSegmentCount.
/**
* Tests the ability of the metadata to enforce the Maximum Active Segment Count rule.
*/
@Test
public void testMaxActiveSegmentCount() {
final int maxCount = 2;
final UpdateableContainerMetadata m = new MetadataBuilder(CONTAINER_ID).withMaxActiveSegmentCount(maxCount).build();
// Map 1 segment + 1 transactions. These should fill up the capacity.
m.mapStreamSegmentId("1", 1);
m.mapStreamSegmentId("2", 2, 1);
// Verify we cannot map anything now.
AssertExtensions.assertThrows("Metadata allowed mapping more segments than indicated (segment).", () -> m.mapStreamSegmentId("3", 3), ex -> ex instanceof IllegalStateException);
AssertExtensions.assertThrows("Metadata allowed mapping more segments than indicated (transaction).", () -> m.mapStreamSegmentId("3", 3, 1), ex -> ex instanceof IllegalStateException);
// Verify we are allowed to do this in recovery mode.
m.enterRecoveryMode();
m.mapStreamSegmentId("3", 3);
m.mapStreamSegmentId("4", 4, 3);
m.exitRecoveryMode();
Assert.assertNotNull("Metadata did not map new segment that exceeded the quota in recovery mode.", m.getStreamSegmentMetadata(3));
Assert.assertNotNull("Metadata did not map new transaction that exceeded the quota in recovery mode.", m.getStreamSegmentMetadata(4));
}
use of io.pravega.segmentstore.server.UpdateableContainerMetadata in project pravega by pravega.
the class StreamSegmentContainerMetadataTests method testReset.
/**
* Tests the ability for the metadata to reset itself.
*/
@Test
public void testReset() {
// Segments, Sequence Number + Truncation markers
final UpdateableContainerMetadata m = new MetadataBuilder(CONTAINER_ID).build();
// Set a high Sequence Number
m.enterRecoveryMode();
m.setOperationSequenceNumber(Integer.MAX_VALUE);
m.setContainerEpoch(Integer.MAX_VALUE + 1L);
m.exitRecoveryMode();
// Populate some StreamSegments.
ArrayList<Long> segmentIds = new ArrayList<>();
for (long i = 0; i < SEGMENT_COUNT; i++) {
final long segmentId = segmentIds.size();
segmentIds.add(segmentId);
m.mapStreamSegmentId(getName(segmentId), segmentId);
for (long j = 0; j < TRANSACTIONS_PER_SEGMENT_COUNT; j++) {
final long transactionId = segmentIds.size();
segmentIds.add(transactionId);
m.mapStreamSegmentId(getName(transactionId), transactionId, segmentId);
}
}
// Add some truncation markers.
final long truncationMarkerSeqNo = 10;
m.recordTruncationMarker(truncationMarkerSeqNo, new TestLogAddress(truncationMarkerSeqNo));
m.setValidTruncationPoint(truncationMarkerSeqNo);
AssertExtensions.assertThrows("reset() worked in non-recovery mode.", m::reset, ex -> ex instanceof IllegalStateException);
// Do the reset.
m.enterRecoveryMode();
m.reset();
m.exitRecoveryMode();
// Verify everything was reset.
Assert.assertEquals("Sequence Number was not reset.", ContainerMetadata.INITIAL_OPERATION_SEQUENCE_NUMBER, m.getOperationSequenceNumber());
AssertExtensions.assertLessThan("Epoch was not reset.", 0, m.getContainerEpoch());
for (long segmentId : segmentIds) {
Assert.assertEquals("SegmentMetadata was not reset (getStreamSegmentId).", ContainerMetadata.NO_STREAM_SEGMENT_ID, m.getStreamSegmentId(getName(segmentId), false));
Assert.assertNull("SegmentMetadata was not reset (getStreamSegmentMetadata).", m.getStreamSegmentMetadata(segmentId));
}
LogAddress tmSeqNo = m.getClosestTruncationMarker(truncationMarkerSeqNo);
Assert.assertNull("Truncation Markers were not reset.", tmSeqNo);
Assert.assertFalse("Truncation Points were not reset.", m.isValidTruncationPoint(truncationMarkerSeqNo));
}
use of io.pravega.segmentstore.server.UpdateableContainerMetadata in project pravega by pravega.
the class DurableLogTests method testRecoveryWithDisabledDataLog.
/**
* Verifies the ability of hte DurableLog to recover (delayed start) using a disabled DurableDataLog. This verifies
* the ability to shut down correctly while still waiting for the DataLog to become enabled as well as detecting that
* it did become enabled and then resume normal operations.
*/
@Test
public void testRecoveryWithDisabledDataLog() throws Exception {
int streamSegmentCount = 50;
int appendsPerStreamSegment = 20;
AtomicReference<TestDurableDataLog> dataLog = new AtomicReference<>();
@Cleanup TestDurableDataLogFactory dataLogFactory = new TestDurableDataLogFactory(new InMemoryDurableDataLogFactory(MAX_DATA_LOG_APPEND_SIZE, executorService()), dataLog::set);
@Cleanup Storage storage = InMemoryStorageFactory.newStorage(executorService());
storage.initialize(1);
@Cleanup InMemoryCacheFactory cacheFactory = new InMemoryCacheFactory();
@Cleanup CacheManager cacheManager = new CacheManager(DEFAULT_READ_INDEX_CONFIG.getCachePolicy(), executorService());
// Write some data to the log. We'll read it later.
HashSet<Long> streamSegmentIds;
List<Operation> originalOperations;
List<OperationWithCompletion> completionFutures;
UpdateableContainerMetadata metadata = new MetadataBuilder(CONTAINER_ID).build();
dataLog.set(null);
try (ReadIndex readIndex = new ContainerReadIndex(DEFAULT_READ_INDEX_CONFIG, metadata, cacheFactory, storage, cacheManager, executorService());
DurableLog durableLog = new DurableLog(ContainerSetup.defaultDurableLogConfig(), metadata, dataLogFactory, readIndex, executorService())) {
// DurableLog should start properly.
durableLog.startAsync().awaitRunning();
streamSegmentIds = createStreamSegmentsWithOperations(streamSegmentCount, metadata, durableLog, storage);
List<Operation> operations = generateOperations(streamSegmentIds, new HashMap<>(), appendsPerStreamSegment, METADATA_CHECKPOINT_EVERY, false, false);
completionFutures = processOperations(operations, durableLog);
OperationWithCompletion.allOf(completionFutures).join();
originalOperations = readUpToSequenceNumber(durableLog, metadata.getOperationSequenceNumber());
}
// Disable the DurableDataLog. This requires us to initialize the log, then disable it.
metadata = new MetadataBuilder(CONTAINER_ID).build();
dataLog.set(null);
try (ReadIndex readIndex = new ContainerReadIndex(DEFAULT_READ_INDEX_CONFIG, metadata, cacheFactory, storage, cacheManager, executorService());
DurableLog durableLog = new DurableLog(ContainerSetup.defaultDurableLogConfig(), metadata, dataLogFactory, readIndex, executorService())) {
// DurableLog should start properly.
durableLog.startAsync().awaitRunning();
CompletableFuture<Void> online = durableLog.awaitOnline();
Assert.assertTrue("awaitOnline() returned an incomplete future.", Futures.isSuccessful(online));
Assert.assertFalse("Not expecting an offline DurableLog.", durableLog.isOffline());
dataLog.get().disable();
}
// Verify that the DurableLog starts properly and that all operations throw appropriate exceptions.
try (ReadIndex readIndex = new ContainerReadIndex(DEFAULT_READ_INDEX_CONFIG, metadata, cacheFactory, storage, cacheManager, executorService());
DurableLog durableLog = new DurableLog(ContainerSetup.defaultDurableLogConfig(), metadata, dataLogFactory, readIndex, executorService())) {
// DurableLog should start properly.
durableLog.startAsync().awaitRunning();
CompletableFuture<Void> online = durableLog.awaitOnline();
Assert.assertFalse("awaitOnline() returned a completed future.", online.isDone());
Assert.assertTrue("Expecting an offline DurableLog.", durableLog.isOffline());
// Verify all operations fail with the right exception.
AssertExtensions.assertThrows("add() did not fail with the right exception when offline.", () -> durableLog.add(new ProbeOperation(), TIMEOUT), ex -> ex instanceof ContainerOfflineException);
AssertExtensions.assertThrows("read() did not fail with the right exception when offline.", () -> durableLog.read(0, 1, TIMEOUT), ex -> ex instanceof ContainerOfflineException);
AssertExtensions.assertThrows("truncate() did not fail with the right exception when offline.", () -> durableLog.truncate(0, TIMEOUT), ex -> ex instanceof ContainerOfflineException);
AssertExtensions.assertThrows("operationProcessingBarrier() did not fail with the right exception when offline.", () -> durableLog.operationProcessingBarrier(TIMEOUT), ex -> ex instanceof ContainerOfflineException);
// Verify we can also shut it down properly from this state.
durableLog.stopAsync().awaitTerminated();
Assert.assertTrue("awaitOnline() returned future did not fail when DurableLog shut down.", online.isCompletedExceptionally());
}
// Verify that, when the DurableDataLog becomes enabled, the DurableLog can pick up the change and resume normal operations.
// Verify that the DurableLog starts properly and that all operations throw appropriate exceptions.
dataLog.set(null);
try (ReadIndex readIndex = new ContainerReadIndex(DEFAULT_READ_INDEX_CONFIG, metadata, cacheFactory, storage, cacheManager, executorService());
DurableLog durableLog = new DurableLog(ContainerSetup.defaultDurableLogConfig(), metadata, dataLogFactory, readIndex, executorService())) {
// DurableLog should start properly.
durableLog.startAsync().awaitRunning();
CompletableFuture<Void> online = durableLog.awaitOnline();
Assert.assertFalse("awaitOnline() returned a completed future.", online.isDone());
// Enable the underlying data log and await for recovery to finish.
dataLog.get().enable();
online.get(START_RETRY_DELAY_MILLIS * 100, TimeUnit.MILLISECONDS);
Assert.assertFalse("Not expecting an offline DurableLog after re-enabling.", durableLog.isOffline());
// Verify we can still read the data that we wrote before the DataLog was disabled.
List<Operation> recoveredOperations = readUpToSequenceNumber(durableLog, metadata.getOperationSequenceNumber());
assertRecoveredOperationsMatch(originalOperations, recoveredOperations);
performMetadataChecks(streamSegmentIds, new HashSet<>(), new HashMap<>(), completionFutures, metadata, false, false);
performReadIndexChecks(completionFutures, readIndex);
// Stop the processor.
durableLog.stopAsync().awaitTerminated();
}
}
use of io.pravega.segmentstore.server.UpdateableContainerMetadata in project pravega by pravega.
the class OperationMetadataUpdaterTests method testRollback.
/**
* Tests the ability to rollback update transactions.
*/
@Test
public void testRollback() throws Exception {
// 2 out of 3 UpdateTransactions are failed (to verify multi-failure).
// Commit the rest and verify final metadata is as it should.
final int failEvery = 3;
Predicate<Integer> isIgnored = index -> index % failEvery > 0;
Predicate<Integer> shouldFail = index -> index % failEvery == failEvery - 1;
val referenceMetadata = createBlankMetadata();
val metadata = createBlankMetadata();
val updater = new OperationMetadataUpdater(metadata);
val lastSegmentId = new AtomicLong(-1);
val lastSegmentTxnId = new AtomicLong(-1);
val updateTransactions = new ArrayList<Map.Entry<Long, ContainerMetadata>>();
for (int i = 0; i < TRANSACTION_COUNT; i++) {
// Check to see if this UpdateTransaction is going to end up being rolled back. If so, we should not update
// the reference metadata at all.
UpdateableContainerMetadata txnReferenceMetadata = isIgnored.test(i) ? null : referenceMetadata;
populateUpdateTransaction(updater, txnReferenceMetadata, lastSegmentId, lastSegmentTxnId);
if (shouldFail.test(i)) {
long prevUtId = updateTransactions.get(updateTransactions.size() - 1).getKey();
updater.rollback(prevUtId + 1);
} else if (txnReferenceMetadata != null) {
// Not failing and not ignored: this UpdateTransaction will survive, so record it.
long utId = updater.sealTransaction();
if (updateTransactions.size() > 0) {
long prevUtId = updateTransactions.get(updateTransactions.size() - 1).getKey();
Assert.assertEquals("Unexpected UpdateTransaction.Id.", prevUtId + failEvery - 1, utId);
}
updateTransactions.add(new AbstractMap.SimpleImmutableEntry<>(utId, clone(txnReferenceMetadata)));
}
}
ContainerMetadata previousMetadata = null;
for (val t : updateTransactions) {
val utId = t.getKey();
val expectedMetadata = t.getValue();
// Check to see if it's time to commit.
if (previousMetadata != null) {
// Verify no changes to the metadata prior to commit.
ContainerMetadataUpdateTransactionTests.assertMetadataSame("Before commit " + utId, previousMetadata, metadata);
}
// Commit and verify.
updater.commit(utId);
ContainerMetadataUpdateTransactionTests.assertMetadataSame("After commit " + utId, expectedMetadata, metadata);
previousMetadata = expectedMetadata;
}
}
Aggregations