use of io.pravega.segmentstore.server.logs.operations.MetadataCheckpointOperation in project pravega by pravega.
the class TestWriterDataSource method add.
// endregion
// region add
public long add(Operation operation) {
Exceptions.checkNotClosed(this.closed.get(), this);
Preconditions.checkArgument(operation.getSequenceNumber() < 0, "Given operation already has a sequence number.");
// If not a checkpoint op, see if we need to auto-add one.
boolean isCheckpoint = operation instanceof MetadataCheckpointOperation;
if (!isCheckpoint) {
if (this.config.autoInsertCheckpointFrequency != DataSourceConfig.NO_METADATA_CHECKPOINT && this.metadata.getOperationSequenceNumber() - this.lastAddedCheckpoint.get() >= this.config.autoInsertCheckpointFrequency) {
MetadataCheckpointOperation checkpointOperation = new MetadataCheckpointOperation();
this.lastAddedCheckpoint.set(add(checkpointOperation));
}
}
// Set the Sequence Number, after the possible recursive call to add a checkpoint (to maintain Seq No order).
operation.setSequenceNumber(this.metadata.nextOperationSequenceNumber());
// get picked up very fast by the Writer, so we need to have everything in place).
if (isCheckpoint) {
this.metadata.recordTruncationMarker(operation.getSequenceNumber(), new TestLogAddress(operation.getSequenceNumber()));
this.metadata.setValidTruncationPoint(operation.getSequenceNumber());
}
if (!this.log.add(operation)) {
throw new IllegalStateException("Sequence numbers out of order.");
}
notifyAddProcessed();
return operation.getSequenceNumber();
}
use of io.pravega.segmentstore.server.logs.operations.MetadataCheckpointOperation in project pravega by pravega.
the class ContainerMetadataUpdateTransactionTests method testProcessMetadataCheckpoint.
/**
* Tests the processMetadataOperation method with MetadataCheckpoint operations.
*/
@Test
public void testProcessMetadataCheckpoint() throws Exception {
// When encountering MetadataCheckpoint in non-Recovery Mode, the ContainerMetadataUpdateTransaction serializes a snapshot
// of the current metadata inside the Operation.
// When encountering MetadataCheckpoint in Recovery Mode, the ContainerMetadataUpdateTransaction deserializes the snapshot-ted
// metadata in it and applies it to the container metadata (inside the transaction). All existing metadata updates
// are cleared.
String newSegmentName = "NewSegmentId";
AtomicLong seqNo = new AtomicLong();
// Create a non-empty metadata, and in addition, seal a segment and truncate it.
this.timeProvider.setElapsedMillis(1234);
UpdateableContainerMetadata metadata = createMetadata();
metadata.getStreamSegmentMetadata(SEGMENT_ID).markSealed();
metadata.getStreamSegmentMetadata(SEGMENT_ID).setStartOffset(SEGMENT_LENGTH / 2);
val txn = createUpdateTransaction(metadata);
// Checkpoint 1: original metadata.
// Checkpoint 2: Checkpoint 1 + 1 StreamSegment and 1 Transaction + 1 Append
MetadataCheckpointOperation checkpoint1 = createMetadataCheckpoint();
MetadataCheckpointOperation checkpoint2 = createMetadataCheckpoint();
// Checkpoint 1 Should have original metadata.
processOperation(checkpoint1, txn, seqNo::incrementAndGet);
UpdateableContainerMetadata checkpointedMetadata = getCheckpointedMetadata(checkpoint1);
assertMetadataSame("Unexpected metadata before any operation.", metadata, checkpointedMetadata);
// Map another StreamSegment, and add an append
StreamSegmentMapOperation mapOp = new StreamSegmentMapOperation(StreamSegmentInformation.builder().name(newSegmentName).length(SEGMENT_LENGTH).build());
processOperation(mapOp, txn, seqNo::incrementAndGet);
processOperation(new StreamSegmentAppendOperation(mapOp.getStreamSegmentId(), DEFAULT_APPEND_DATA, createAttributeUpdates()), txn, seqNo::incrementAndGet);
processOperation(checkpoint2, txn, seqNo::incrementAndGet);
// Checkpoint 2 should have Checkpoint 1 + New StreamSegment + Append.
txn.commit(metadata);
checkpointedMetadata = getCheckpointedMetadata(checkpoint2);
assertMetadataSame("Unexpected metadata after deserializing checkpoint.", metadata, checkpointedMetadata);
}
use of io.pravega.segmentstore.server.logs.operations.MetadataCheckpointOperation in project pravega by pravega.
the class DurableLogTests method testTruncateWithStorageMetadataCheckpoints.
/**
* Tests the ability of the truncate() method to auto-queue (and wait for) mini-metadata checkpoints containing items
* that are not updated via normal operations. Such items include StorageLength and IsSealedInStorage.
*/
@Test
public void testTruncateWithStorageMetadataCheckpoints() {
int streamSegmentCount = 50;
int appendsPerStreamSegment = 20;
// Setup a DurableLog and start it.
@Cleanup TestDurableDataLogFactory dataLogFactory = new TestDurableDataLogFactory(new InMemoryDurableDataLogFactory(MAX_DATA_LOG_APPEND_SIZE, executorService()));
@Cleanup Storage storage = InMemoryStorageFactory.newStorage(executorService());
storage.initialize(1);
val metadata1 = new MetadataBuilder(CONTAINER_ID).build();
@Cleanup InMemoryCacheFactory cacheFactory = new InMemoryCacheFactory();
@Cleanup CacheManager cacheManager = new CacheManager(DEFAULT_READ_INDEX_CONFIG.getCachePolicy(), executorService());
@Cleanup val readIndex1 = new ContainerReadIndex(DEFAULT_READ_INDEX_CONFIG, metadata1, cacheFactory, storage, cacheManager, executorService());
HashSet<Long> streamSegmentIds;
List<OperationWithCompletion> completionFutures;
// First DurableLog. We use this for generating data.
try (DurableLog durableLog = new DurableLog(ContainerSetup.defaultDurableLogConfig(), metadata1, dataLogFactory, readIndex1, executorService())) {
durableLog.startAsync().awaitRunning();
// Generate some test data (we need to do this after we started the DurableLog because in the process of
// recovery, it wipes away all existing metadata).
streamSegmentIds = createStreamSegmentsWithOperations(streamSegmentCount, metadata1, durableLog, storage);
List<Operation> queuedOperations = generateOperations(streamSegmentIds, new HashMap<>(), appendsPerStreamSegment, METADATA_CHECKPOINT_EVERY, false, false);
completionFutures = processOperations(queuedOperations, durableLog);
OperationWithCompletion.allOf(completionFutures).join();
// Update the metadata with Storage-related data. Set some arbitrary StorageOffsets and seal 50% of the segments in storage.
long storageOffset = 0;
for (long segmentId : streamSegmentIds) {
val sm = metadata1.getStreamSegmentMetadata(segmentId);
sm.setStorageLength(Math.min(storageOffset, sm.getLength()));
storageOffset++;
if (sm.isSealed() && storageOffset % 2 == 0) {
sm.markSealedInStorage();
}
}
// Truncate at the last possible truncation point.
val originalOperations = readUpToSequenceNumber(durableLog, metadata1.getOperationSequenceNumber());
long lastCheckpointSeqNo = -1;
for (Operation o : originalOperations) {
if (o instanceof MetadataCheckpointOperation) {
lastCheckpointSeqNo = o.getSequenceNumber();
}
}
AssertExtensions.assertGreaterThan("Could not find any truncation points.", 0, lastCheckpointSeqNo);
durableLog.truncate(lastCheckpointSeqNo, TIMEOUT).join();
// Stop the processor.
durableLog.stopAsync().awaitTerminated();
}
// Start a second DurableLog and then verify the metadata.
val metadata2 = new MetadataBuilder(CONTAINER_ID).build();
@Cleanup val readIndex2 = new ContainerReadIndex(DEFAULT_READ_INDEX_CONFIG, metadata2, cacheFactory, storage, cacheManager, executorService());
try (DurableLog durableLog = new DurableLog(ContainerSetup.defaultDurableLogConfig(), metadata2, dataLogFactory, readIndex2, executorService())) {
durableLog.startAsync().awaitRunning();
// Check Metadata1 vs Metadata2
for (long segmentId : streamSegmentIds) {
val sm1 = metadata1.getStreamSegmentMetadata(segmentId);
val sm2 = metadata2.getStreamSegmentMetadata(segmentId);
Assert.assertEquals("StorageLength differs for recovered segment " + segmentId, sm1.getStorageLength(), sm2.getStorageLength());
Assert.assertEquals("IsSealedInStorage differs for recovered segment " + segmentId, sm1.isSealedInStorage(), sm2.isSealedInStorage());
}
// Stop the processor.
durableLog.stopAsync().awaitTerminated();
}
}
use of io.pravega.segmentstore.server.logs.operations.MetadataCheckpointOperation in project pravega by pravega.
the class DurableLogTests method testTruncateWithoutRecovery.
// endregion
// region Truncation
/**
* Tests the truncate() method without doing any recovery.
*/
@Test
public void testTruncateWithoutRecovery() {
int streamSegmentCount = 50;
int appendsPerStreamSegment = 20;
// Setup a DurableLog and start it.
AtomicReference<TestDurableDataLog> dataLog = new AtomicReference<>();
AtomicReference<Boolean> truncationOccurred = new AtomicReference<>();
@Cleanup TestDurableDataLogFactory dataLogFactory = new TestDurableDataLogFactory(new InMemoryDurableDataLogFactory(MAX_DATA_LOG_APPEND_SIZE, executorService()), dataLog::set);
@Cleanup Storage storage = InMemoryStorageFactory.newStorage(executorService());
storage.initialize(1);
UpdateableContainerMetadata metadata = new MetadataBuilder(CONTAINER_ID).build();
@Cleanup InMemoryCacheFactory cacheFactory = new InMemoryCacheFactory();
@Cleanup CacheManager cacheManager = new CacheManager(DEFAULT_READ_INDEX_CONFIG.getCachePolicy(), executorService());
@Cleanup ReadIndex readIndex = new ContainerReadIndex(DEFAULT_READ_INDEX_CONFIG, metadata, cacheFactory, storage, cacheManager, executorService());
// First DurableLog. We use this for generating data.
try (DurableLog durableLog = new DurableLog(ContainerSetup.defaultDurableLogConfig(), metadata, dataLogFactory, readIndex, executorService())) {
durableLog.startAsync().awaitRunning();
// Hook up a listener to figure out when truncation actually happens.
dataLog.get().setTruncateCallback(seqNo -> truncationOccurred.set(true));
// Generate some test data (we need to do this after we started the DurableLog because in the process of
// recovery, it wipes away all existing metadata).
HashSet<Long> streamSegmentIds = createStreamSegmentsWithOperations(streamSegmentCount, metadata, durableLog, storage);
List<Operation> queuedOperations = generateOperations(streamSegmentIds, new HashMap<>(), appendsPerStreamSegment, METADATA_CHECKPOINT_EVERY, false, false);
// Add one of these at the end to ensure we can truncate everything.
queuedOperations.add(new MetadataCheckpointOperation());
List<OperationWithCompletion> completionFutures = processOperations(queuedOperations, durableLog);
OperationWithCompletion.allOf(completionFutures).join();
// Get a list of all the operations, before truncation.
List<Operation> originalOperations = readUpToSequenceNumber(durableLog, metadata.getOperationSequenceNumber());
boolean fullTruncationPossible = false;
// At the end, verify all operations and all entries in the DataLog were truncated.
for (int i = 0; i < originalOperations.size(); i++) {
Operation currentOperation = originalOperations.get(i);
truncationOccurred.set(false);
if (currentOperation instanceof MetadataCheckpointOperation) {
// Need to figure out if the operation we're about to truncate to is actually the first in the log;
// in that case, we should not be expecting any truncation.
boolean isTruncationPointFirstOperation = durableLog.read(-1, 1, TIMEOUT).join().next() instanceof MetadataCheckpointOperation;
// Perform the truncation.
durableLog.truncate(currentOperation.getSequenceNumber(), TIMEOUT).join();
if (!isTruncationPointFirstOperation) {
Assert.assertTrue("No truncation occurred even though a valid Truncation Point was passed: " + currentOperation.getSequenceNumber(), truncationOccurred.get());
}
// Verify all operations up to, and including this one have been removed.
Iterator<Operation> reader = durableLog.read(-1, 2, TIMEOUT).join();
Assert.assertTrue("Not expecting an empty log after truncating an operation (a MetadataCheckpoint must always exist).", reader.hasNext());
verifyFirstItemIsMetadataCheckpoint(reader);
if (i < originalOperations.size() - 1) {
Operation firstOp = reader.next();
OperationComparer.DEFAULT.assertEquals(String.format("Unexpected first operation after truncating SeqNo %d.", currentOperation.getSequenceNumber()), originalOperations.get(i + 1), firstOp);
} else {
// Sometimes the Truncation Point is on the same DataFrame as other data, and it's the last DataFrame;
// In that case, it cannot be truncated, since truncating the frame would mean losing the Checkpoint as well.
fullTruncationPossible = !reader.hasNext();
}
} else {
// Verify we are not allowed to truncate on non-valid Truncation Points.
AssertExtensions.assertThrows("DurableLog allowed truncation on a non-MetadataCheckpointOperation.", () -> durableLog.truncate(currentOperation.getSequenceNumber(), TIMEOUT), ex -> ex instanceof IllegalArgumentException);
// Verify the Operation Log is still intact.
Iterator<Operation> reader = durableLog.read(-1, 1, TIMEOUT).join();
Assert.assertTrue("No elements left in the log even though no truncation occurred.", reader.hasNext());
Operation firstOp = reader.next();
AssertExtensions.assertLessThanOrEqual("It appears that Operations were removed from the Log even though no truncation happened.", currentOperation.getSequenceNumber(), firstOp.getSequenceNumber());
}
}
// Verify that we can still queue operations to the DurableLog and they can be read.
// In this case we'll just queue some StreamSegmentMapOperations.
StreamSegmentMapOperation newOp = new StreamSegmentMapOperation(StreamSegmentInformation.builder().name("foo").build());
if (!fullTruncationPossible) {
// We were not able to do a full truncation before. Do one now, since we are guaranteed to have a new DataFrame available.
MetadataCheckpointOperation lastCheckpoint = new MetadataCheckpointOperation();
durableLog.add(lastCheckpoint, TIMEOUT).join();
durableLog.truncate(lastCheckpoint.getSequenceNumber(), TIMEOUT).join();
}
durableLog.add(newOp, TIMEOUT).join();
// Full Checkpoint + Storage Checkpoint (auto-added)+ new op
final int expectedOperationCount = 3;
List<Operation> newOperations = readUpToSequenceNumber(durableLog, metadata.getOperationSequenceNumber());
Assert.assertEquals("Unexpected number of operations added after full truncation.", expectedOperationCount, newOperations.size());
Assert.assertTrue("Expecting the first operation after full truncation to be a MetadataCheckpointOperation.", newOperations.get(0) instanceof MetadataCheckpointOperation);
Assert.assertTrue("Expecting a StorageMetadataCheckpointOperation to be auto-added after full truncation.", newOperations.get(1) instanceof StorageMetadataCheckpointOperation);
Assert.assertEquals("Unexpected Operation encountered after full truncation.", newOp, newOperations.get(2));
// Stop the processor.
durableLog.stopAsync().awaitTerminated();
}
}
use of io.pravega.segmentstore.server.logs.operations.MetadataCheckpointOperation in project pravega by pravega.
the class ContainerMetadataUpdateTransaction method preProcessOperation.
/**
* Pre-processes the given Operation. See OperationMetadataUpdater.preProcessOperation for more details on behavior.
*
* @param operation The operation to pre-process.
* @throws ContainerException If the given operation was rejected given the current state of the container metadata.
* @throws StreamSegmentException If the given operation was incompatible with the current state of the Segment.
* For example: StreamSegmentNotExistsException, StreamSegmentSealedException or
* StreamSegmentMergedException.
*/
void preProcessOperation(Operation operation) throws ContainerException, StreamSegmentException {
checkNotSealed();
SegmentMetadataUpdateTransaction segmentMetadata = null;
if (operation instanceof SegmentOperation) {
segmentMetadata = getSegmentUpdateTransaction(((SegmentOperation) operation).getStreamSegmentId());
if (segmentMetadata.isDeleted()) {
throw new StreamSegmentNotExistsException(segmentMetadata.getName());
}
}
if (operation instanceof StreamSegmentAppendOperation) {
segmentMetadata.preProcessOperation((StreamSegmentAppendOperation) operation);
} else if (operation instanceof StreamSegmentSealOperation) {
segmentMetadata.preProcessOperation((StreamSegmentSealOperation) operation);
} else if (operation instanceof MergeTransactionOperation) {
MergeTransactionOperation mbe = (MergeTransactionOperation) operation;
SegmentMetadataUpdateTransaction transactionMetadata = getSegmentUpdateTransaction(mbe.getTransactionSegmentId());
transactionMetadata.preProcessAsTransactionSegment(mbe);
segmentMetadata.preProcessAsParentSegment(mbe, transactionMetadata);
} else if (operation instanceof StreamSegmentMapOperation) {
preProcessMetadataOperation((StreamSegmentMapOperation) operation);
} else if (operation instanceof UpdateAttributesOperation) {
segmentMetadata.preProcessOperation((UpdateAttributesOperation) operation);
} else if (operation instanceof MetadataCheckpointOperation) {
// MetadataCheckpointOperations do not require preProcess and accept; they can be handled in a single stage.
processMetadataOperation((MetadataCheckpointOperation) operation);
} else if (operation instanceof StorageMetadataCheckpointOperation) {
// StorageMetadataCheckpointOperation do not require preProcess and accept; they can be handled in a single stage.
processMetadataOperation((StorageMetadataCheckpointOperation) operation);
} else if (operation instanceof StreamSegmentTruncateOperation) {
segmentMetadata.preProcessOperation((StreamSegmentTruncateOperation) operation);
}
}
Aggregations