use of io.pravega.segmentstore.server.logs.operations.MetadataCheckpointOperation in project pravega by pravega.
the class ContainerMetadataUpdateTransactionTests method testProcessMetadataCheckpointIgnored.
/**
* Tests the processMetadataOperation method with MetadataCheckpoint operations, when such checkpoints are skipped over
* because they are after other operations.
*/
@Test
public void testProcessMetadataCheckpointIgnored() throws Exception {
long newSegmentId = 897658;
String newSegmentName = "NewSegmentId";
AtomicLong seqNo = new AtomicLong();
// Create a non-empty metadata.
UpdateableContainerMetadata metadata = createMetadata();
val txn = createUpdateTransaction(metadata);
MetadataCheckpointOperation checkpointedMetadata = createMetadataCheckpoint();
processOperation(checkpointedMetadata, txn, seqNo::incrementAndGet);
// Create a blank metadata, and add an operation to the updater (which would result in mapping a new StreamSegment).
metadata = createBlankMetadata();
metadata.enterRecoveryMode();
val txn2 = createUpdateTransaction(metadata);
StreamSegmentMapOperation mapOp = createMap(newSegmentName);
mapOp.setStreamSegmentId(newSegmentId);
processOperation(mapOp, txn2, seqNo::incrementAndGet);
// Now try to process the checkpoint
processOperation(checkpointedMetadata, txn2, seqNo::incrementAndGet);
txn2.commit(metadata);
// Verify the checkpointed metadata hasn't been applied
Assert.assertNull("Newly added StreamSegment Id was not removed after applying checkpoint.", metadata.getStreamSegmentMetadata(mapOp.getStreamSegmentId()));
Assert.assertNotNull("Checkpoint seems to have not been applied.", metadata.getStreamSegmentMetadata(SEGMENT_ID));
}
use of io.pravega.segmentstore.server.logs.operations.MetadataCheckpointOperation in project pravega by pravega.
the class DurableLogTests method testMetadataCheckpoint.
/**
* Tests the ability of the DurableLog to add MetadataCheckpointOperations.
*
* @param createDurableLogConfig A Supplier that creates a DurableLogConfig object.
* @param waitForProcessingFrequency The frequency at which to stop and wait for operations to be processed by the
* DurableLog before adding others.
*/
private void testMetadataCheckpoint(Supplier<DurableLogConfig> createDurableLogConfig, int waitForProcessingFrequency) throws Exception {
int streamSegmentCount = 500;
int appendsPerStreamSegment = 20;
// Setup a DurableLog and start it.
@Cleanup ContainerSetup setup = new ContainerSetup(executorService());
DurableLogConfig durableLogConfig = createDurableLogConfig.get();
setup.setDurableLogConfig(durableLogConfig);
@Cleanup DurableLog durableLog = setup.createDurableLog();
durableLog.startAsync().awaitRunning();
// Verify that on a freshly created DurableLog, it auto-adds a MetadataCheckpoint as the first operation.
verifyFirstItemIsMetadataCheckpoint(durableLog.read(-1L, 1, TIMEOUT).get(TIMEOUT.toMillis(), TimeUnit.MILLISECONDS));
// Generate some test data (we need to do this after we started the DurableLog because in the process of
// recovery, it wipes away all existing metadata).
HashSet<Long> streamSegmentIds = createStreamSegmentsInMetadata(streamSegmentCount, setup.metadata);
List<Operation> operations = generateOperations(streamSegmentIds, Collections.emptyMap(), appendsPerStreamSegment, NO_METADATA_CHECKPOINT, false, false);
// Process all generated operations.
List<OperationWithCompletion> completionFutures = processOperations(operations, durableLog, waitForProcessingFrequency);
// Wait for all such operations to complete. If any of them failed, this will fail too and report the exception.
OperationWithCompletion.allOf(completionFutures).get(TIMEOUT.toMillis(), TimeUnit.MILLISECONDS);
List<Operation> readOperations = readUpToSequenceNumber(durableLog, setup.metadata.getOperationSequenceNumber());
// Count the number of injected MetadataCheckpointOperations.
int injectedOperationCount = 0;
for (Operation o : readOperations) {
if (o instanceof MetadataCheckpointOperation) {
injectedOperationCount++;
}
}
// Calculate how many we were expecting.
int expectedCheckpoints = readOperations.size() - (int) operations.stream().filter(Operation::canSerialize).count();
if (expectedCheckpoints != injectedOperationCount) {
Assert.assertEquals("Unexpected operations were injected. Expected only MetadataCheckpointOperations.", expectedCheckpoints, injectedOperationCount);
}
// We expect at least 2 injected operations (one is the very first one (checked above), and then at least
// one more based on written data.
AssertExtensions.assertGreaterThan("Insufficient number of injected operations.", 1, injectedOperationCount);
// Stop the processor.
durableLog.stopAsync().awaitTerminated();
}
use of io.pravega.segmentstore.server.logs.operations.MetadataCheckpointOperation in project pravega by pravega.
the class DurableLogTests method verifyFirstItemIsMetadataCheckpoint.
private void verifyFirstItemIsMetadataCheckpoint(Iterator<Operation> logIterator) {
Assert.assertTrue("DurableLog is empty even though a MetadataCheckpointOperation was expected.", logIterator.hasNext());
Operation firstOp = logIterator.next();
Assert.assertTrue("First operation in DurableLog is not a MetadataCheckpointOperation: " + firstOp, firstOp instanceof MetadataCheckpointOperation);
}
use of io.pravega.segmentstore.server.logs.operations.MetadataCheckpointOperation in project pravega by pravega.
the class DurableLogTests method testTruncateWithRecovery.
/**
* Tests the truncate() method while performing recovery.
*/
@Test
public void testTruncateWithRecovery() {
int streamSegmentCount = 50;
int appendsPerStreamSegment = 20;
// Setup a DurableLog and start it.
AtomicReference<TestDurableDataLog> dataLog = new AtomicReference<>();
AtomicReference<Boolean> truncationOccurred = new AtomicReference<>();
@Cleanup TestDurableDataLogFactory dataLogFactory = new TestDurableDataLogFactory(new InMemoryDurableDataLogFactory(MAX_DATA_LOG_APPEND_SIZE, executorService()), dataLog::set);
@Cleanup Storage storage = InMemoryStorageFactory.newStorage(executorService());
storage.initialize(1);
UpdateableContainerMetadata metadata = new MetadataBuilder(CONTAINER_ID).build();
@Cleanup InMemoryCacheFactory cacheFactory = new InMemoryCacheFactory();
@Cleanup CacheManager cacheManager = new CacheManager(DEFAULT_READ_INDEX_CONFIG.getCachePolicy(), executorService());
@Cleanup ReadIndex readIndex = new ContainerReadIndex(DEFAULT_READ_INDEX_CONFIG, metadata, cacheFactory, storage, cacheManager, executorService());
HashSet<Long> streamSegmentIds;
List<OperationWithCompletion> completionFutures;
List<Operation> originalOperations;
// First DurableLog. We use this for generating data.
try (DurableLog durableLog = new DurableLog(ContainerSetup.defaultDurableLogConfig(), metadata, dataLogFactory, readIndex, executorService())) {
durableLog.startAsync().awaitRunning();
// Generate some test data (we need to do this after we started the DurableLog because in the process of
// recovery, it wipes away all existing metadata).
streamSegmentIds = createStreamSegmentsWithOperations(streamSegmentCount, metadata, durableLog, storage);
List<Operation> queuedOperations = generateOperations(streamSegmentIds, new HashMap<>(), appendsPerStreamSegment, METADATA_CHECKPOINT_EVERY, false, false);
completionFutures = processOperations(queuedOperations, durableLog);
OperationWithCompletion.allOf(completionFutures).join();
// Get a list of all the operations, before any truncation.
originalOperations = readUpToSequenceNumber(durableLog, metadata.getOperationSequenceNumber());
// Stop the processor.
durableLog.stopAsync().awaitTerminated();
}
// Truncate up to each MetadataCheckpointOperation and:
// * If the DataLog was truncated:
// ** Shut down DurableLog, re-start it (recovery) and verify the operations are as they should.
// At the end, verify all operations and all entries in the DataLog were truncated.
DurableLog durableLog = new DurableLog(ContainerSetup.defaultDurableLogConfig(), metadata, dataLogFactory, readIndex, executorService());
try {
durableLog.startAsync().awaitRunning();
dataLog.get().setTruncateCallback(seqNo -> truncationOccurred.set(true));
for (int i = 0; i < originalOperations.size(); i++) {
Operation currentOperation = originalOperations.get(i);
if (!(currentOperation instanceof MetadataCheckpointOperation)) {
// We can only truncate on MetadataCheckpointOperations.
continue;
}
truncationOccurred.set(false);
durableLog.truncate(currentOperation.getSequenceNumber(), TIMEOUT).join();
if (truncationOccurred.get()) {
// Close current DurableLog and start a brand new one, forcing recovery.
durableLog.close();
durableLog = new DurableLog(ContainerSetup.defaultDurableLogConfig(), metadata, dataLogFactory, readIndex, executorService());
durableLog.startAsync().awaitRunning();
dataLog.get().setTruncateCallback(seqNo -> truncationOccurred.set(true));
// Verify all operations up to, and including this one have been removed.
Iterator<Operation> reader = durableLog.read(-1, 2, TIMEOUT).join();
Assert.assertTrue("Not expecting an empty log after truncating an operation (a MetadataCheckpoint must always exist).", reader.hasNext());
verifyFirstItemIsMetadataCheckpoint(reader);
if (i < originalOperations.size() - 1) {
Operation firstOp = reader.next();
OperationComparer.DEFAULT.assertEquals(String.format("Unexpected first operation after truncating SeqNo %d.", currentOperation.getSequenceNumber()), originalOperations.get(i + 1), firstOp);
}
}
}
} finally {
// This closes whatever current instance this variable refers to, not necessarily the first one.
durableLog.close();
}
}
use of io.pravega.segmentstore.server.logs.operations.MetadataCheckpointOperation in project pravega by pravega.
the class ContainerMetadataUpdateTransaction method acceptOperation.
/**
* Accepts the given Operation. The Operation's effects are reflected in the pending transaction.
* This method has no effect on Metadata Operations.
* See OperationMetadataUpdater.acceptOperation for more details on behavior.
*
* @param operation The operation to accept.
* @throws MetadataUpdateException If the given operation was rejected given the current state of the metadata.
* @throws NullPointerException If the operation is null.
*/
void acceptOperation(Operation operation) throws MetadataUpdateException {
checkNotSealed();
SegmentMetadataUpdateTransaction segmentMetadata = null;
if (operation instanceof SegmentOperation) {
segmentMetadata = getSegmentUpdateTransaction(((SegmentOperation) operation).getStreamSegmentId());
segmentMetadata.setLastUsed(operation.getSequenceNumber());
}
if (operation instanceof StreamSegmentAppendOperation) {
segmentMetadata.acceptOperation((StreamSegmentAppendOperation) operation);
} else if (operation instanceof StreamSegmentSealOperation) {
segmentMetadata.acceptOperation((StreamSegmentSealOperation) operation);
} else if (operation instanceof MergeTransactionOperation) {
MergeTransactionOperation mto = (MergeTransactionOperation) operation;
SegmentMetadataUpdateTransaction transactionMetadata = getSegmentUpdateTransaction(mto.getTransactionSegmentId());
transactionMetadata.acceptAsTransactionSegment(mto);
transactionMetadata.setLastUsed(operation.getSequenceNumber());
segmentMetadata.acceptAsParentSegment(mto, transactionMetadata);
} else if (operation instanceof MetadataCheckpointOperation) {
// A MetadataCheckpointOperation represents a valid truncation point. Record it as such.
this.newTruncationPoints.add(operation.getSequenceNumber());
} else if (operation instanceof StreamSegmentMapOperation) {
acceptMetadataOperation((StreamSegmentMapOperation) operation);
} else if (operation instanceof UpdateAttributesOperation) {
segmentMetadata.acceptOperation((UpdateAttributesOperation) operation);
} else if (operation instanceof StreamSegmentTruncateOperation) {
segmentMetadata.acceptOperation((StreamSegmentTruncateOperation) operation);
}
}
Aggregations