use of io.pravega.segmentstore.server.logs.operations.StreamSegmentMapOperation in project pravega by pravega.
the class DurableLogTests method testTruncateWithoutRecovery.
// endregion
// region Truncation
/**
* Tests the truncate() method without doing any recovery.
*/
@Test
public void testTruncateWithoutRecovery() {
int streamSegmentCount = 50;
int appendsPerStreamSegment = 20;
// Setup a DurableLog and start it.
AtomicReference<TestDurableDataLog> dataLog = new AtomicReference<>();
AtomicReference<Boolean> truncationOccurred = new AtomicReference<>();
@Cleanup TestDurableDataLogFactory dataLogFactory = new TestDurableDataLogFactory(new InMemoryDurableDataLogFactory(MAX_DATA_LOG_APPEND_SIZE, executorService()), dataLog::set);
@Cleanup Storage storage = InMemoryStorageFactory.newStorage(executorService());
storage.initialize(1);
UpdateableContainerMetadata metadata = new MetadataBuilder(CONTAINER_ID).build();
@Cleanup InMemoryCacheFactory cacheFactory = new InMemoryCacheFactory();
@Cleanup CacheManager cacheManager = new CacheManager(DEFAULT_READ_INDEX_CONFIG.getCachePolicy(), executorService());
@Cleanup ReadIndex readIndex = new ContainerReadIndex(DEFAULT_READ_INDEX_CONFIG, metadata, cacheFactory, storage, cacheManager, executorService());
// First DurableLog. We use this for generating data.
try (DurableLog durableLog = new DurableLog(ContainerSetup.defaultDurableLogConfig(), metadata, dataLogFactory, readIndex, executorService())) {
durableLog.startAsync().awaitRunning();
// Hook up a listener to figure out when truncation actually happens.
dataLog.get().setTruncateCallback(seqNo -> truncationOccurred.set(true));
// Generate some test data (we need to do this after we started the DurableLog because in the process of
// recovery, it wipes away all existing metadata).
HashSet<Long> streamSegmentIds = createStreamSegmentsWithOperations(streamSegmentCount, metadata, durableLog, storage);
List<Operation> queuedOperations = generateOperations(streamSegmentIds, new HashMap<>(), appendsPerStreamSegment, METADATA_CHECKPOINT_EVERY, false, false);
// Add one of these at the end to ensure we can truncate everything.
queuedOperations.add(new MetadataCheckpointOperation());
List<OperationWithCompletion> completionFutures = processOperations(queuedOperations, durableLog);
OperationWithCompletion.allOf(completionFutures).join();
// Get a list of all the operations, before truncation.
List<Operation> originalOperations = readUpToSequenceNumber(durableLog, metadata.getOperationSequenceNumber());
boolean fullTruncationPossible = false;
// At the end, verify all operations and all entries in the DataLog were truncated.
for (int i = 0; i < originalOperations.size(); i++) {
Operation currentOperation = originalOperations.get(i);
truncationOccurred.set(false);
if (currentOperation instanceof MetadataCheckpointOperation) {
// Need to figure out if the operation we're about to truncate to is actually the first in the log;
// in that case, we should not be expecting any truncation.
boolean isTruncationPointFirstOperation = durableLog.read(-1, 1, TIMEOUT).join().next() instanceof MetadataCheckpointOperation;
// Perform the truncation.
durableLog.truncate(currentOperation.getSequenceNumber(), TIMEOUT).join();
if (!isTruncationPointFirstOperation) {
Assert.assertTrue("No truncation occurred even though a valid Truncation Point was passed: " + currentOperation.getSequenceNumber(), truncationOccurred.get());
}
// Verify all operations up to, and including this one have been removed.
Iterator<Operation> reader = durableLog.read(-1, 2, TIMEOUT).join();
Assert.assertTrue("Not expecting an empty log after truncating an operation (a MetadataCheckpoint must always exist).", reader.hasNext());
verifyFirstItemIsMetadataCheckpoint(reader);
if (i < originalOperations.size() - 1) {
Operation firstOp = reader.next();
OperationComparer.DEFAULT.assertEquals(String.format("Unexpected first operation after truncating SeqNo %d.", currentOperation.getSequenceNumber()), originalOperations.get(i + 1), firstOp);
} else {
// Sometimes the Truncation Point is on the same DataFrame as other data, and it's the last DataFrame;
// In that case, it cannot be truncated, since truncating the frame would mean losing the Checkpoint as well.
fullTruncationPossible = !reader.hasNext();
}
} else {
// Verify we are not allowed to truncate on non-valid Truncation Points.
AssertExtensions.assertThrows("DurableLog allowed truncation on a non-MetadataCheckpointOperation.", () -> durableLog.truncate(currentOperation.getSequenceNumber(), TIMEOUT), ex -> ex instanceof IllegalArgumentException);
// Verify the Operation Log is still intact.
Iterator<Operation> reader = durableLog.read(-1, 1, TIMEOUT).join();
Assert.assertTrue("No elements left in the log even though no truncation occurred.", reader.hasNext());
Operation firstOp = reader.next();
AssertExtensions.assertLessThanOrEqual("It appears that Operations were removed from the Log even though no truncation happened.", currentOperation.getSequenceNumber(), firstOp.getSequenceNumber());
}
}
// Verify that we can still queue operations to the DurableLog and they can be read.
// In this case we'll just queue some StreamSegmentMapOperations.
StreamSegmentMapOperation newOp = new StreamSegmentMapOperation(StreamSegmentInformation.builder().name("foo").build());
if (!fullTruncationPossible) {
// We were not able to do a full truncation before. Do one now, since we are guaranteed to have a new DataFrame available.
MetadataCheckpointOperation lastCheckpoint = new MetadataCheckpointOperation();
durableLog.add(lastCheckpoint, TIMEOUT).join();
durableLog.truncate(lastCheckpoint.getSequenceNumber(), TIMEOUT).join();
}
durableLog.add(newOp, TIMEOUT).join();
// Full Checkpoint + Storage Checkpoint (auto-added)+ new op
final int expectedOperationCount = 3;
List<Operation> newOperations = readUpToSequenceNumber(durableLog, metadata.getOperationSequenceNumber());
Assert.assertEquals("Unexpected number of operations added after full truncation.", expectedOperationCount, newOperations.size());
Assert.assertTrue("Expecting the first operation after full truncation to be a MetadataCheckpointOperation.", newOperations.get(0) instanceof MetadataCheckpointOperation);
Assert.assertTrue("Expecting a StorageMetadataCheckpointOperation to be auto-added after full truncation.", newOperations.get(1) instanceof StorageMetadataCheckpointOperation);
Assert.assertEquals("Unexpected Operation encountered after full truncation.", newOp, newOperations.get(2));
// Stop the processor.
durableLog.stopAsync().awaitTerminated();
}
}
use of io.pravega.segmentstore.server.logs.operations.StreamSegmentMapOperation in project pravega by pravega.
the class ContainerMetadataUpdateTransaction method preProcessOperation.
/**
* Pre-processes the given Operation. See OperationMetadataUpdater.preProcessOperation for more details on behavior.
*
* @param operation The operation to pre-process.
* @throws ContainerException If the given operation was rejected given the current state of the container metadata.
* @throws StreamSegmentException If the given operation was incompatible with the current state of the Segment.
* For example: StreamSegmentNotExistsException, StreamSegmentSealedException or
* StreamSegmentMergedException.
*/
void preProcessOperation(Operation operation) throws ContainerException, StreamSegmentException {
checkNotSealed();
SegmentMetadataUpdateTransaction segmentMetadata = null;
if (operation instanceof SegmentOperation) {
segmentMetadata = getSegmentUpdateTransaction(((SegmentOperation) operation).getStreamSegmentId());
if (segmentMetadata.isDeleted()) {
throw new StreamSegmentNotExistsException(segmentMetadata.getName());
}
}
if (operation instanceof StreamSegmentAppendOperation) {
segmentMetadata.preProcessOperation((StreamSegmentAppendOperation) operation);
} else if (operation instanceof StreamSegmentSealOperation) {
segmentMetadata.preProcessOperation((StreamSegmentSealOperation) operation);
} else if (operation instanceof MergeTransactionOperation) {
MergeTransactionOperation mbe = (MergeTransactionOperation) operation;
SegmentMetadataUpdateTransaction transactionMetadata = getSegmentUpdateTransaction(mbe.getTransactionSegmentId());
transactionMetadata.preProcessAsTransactionSegment(mbe);
segmentMetadata.preProcessAsParentSegment(mbe, transactionMetadata);
} else if (operation instanceof StreamSegmentMapOperation) {
preProcessMetadataOperation((StreamSegmentMapOperation) operation);
} else if (operation instanceof UpdateAttributesOperation) {
segmentMetadata.preProcessOperation((UpdateAttributesOperation) operation);
} else if (operation instanceof MetadataCheckpointOperation) {
// MetadataCheckpointOperations do not require preProcess and accept; they can be handled in a single stage.
processMetadataOperation((MetadataCheckpointOperation) operation);
} else if (operation instanceof StorageMetadataCheckpointOperation) {
// StorageMetadataCheckpointOperation do not require preProcess and accept; they can be handled in a single stage.
processMetadataOperation((StorageMetadataCheckpointOperation) operation);
} else if (operation instanceof StreamSegmentTruncateOperation) {
segmentMetadata.preProcessOperation((StreamSegmentTruncateOperation) operation);
}
}
use of io.pravega.segmentstore.server.logs.operations.StreamSegmentMapOperation in project pravega by pravega.
the class MemoryStateUpdaterTests method populate.
private ArrayList<Operation> populate(MemoryStateUpdater updater, int segmentCount, int operationCountPerType) throws DataCorruptionException {
ArrayList<Operation> operations = new ArrayList<>();
long offset = 0;
for (int i = 0; i < segmentCount; i++) {
for (int j = 0; j < operationCountPerType; j++) {
StreamSegmentMapOperation mapOp = new StreamSegmentMapOperation(StreamSegmentInformation.builder().name("a").length(i * j).build());
mapOp.setStreamSegmentId(i);
operations.add(mapOp);
StreamSegmentAppendOperation appendOp = new StreamSegmentAppendOperation(i, Integer.toString(i).getBytes(), null);
appendOp.setStreamSegmentOffset(offset);
offset += appendOp.getData().length;
operations.add(appendOp);
operations.add(new MergeTransactionOperation(i, j));
}
}
for (int i = 0; i < operations.size(); i++) {
operations.get(i).setSequenceNumber(i);
}
updater.process(operations.iterator());
return operations;
}
use of io.pravega.segmentstore.server.logs.operations.StreamSegmentMapOperation in project pravega by pravega.
the class StreamSegmentMapperTests method testGetOrAssignStreamSegmentIdWithConcurrency.
/**
* Tests the ability of the StreamSegmentMapper to generate/return the Id of an existing StreamSegment, with concurrent requests.
* Also tests the ability to execute such callbacks in the order in which they were received.
*/
@Test
public void testGetOrAssignStreamSegmentIdWithConcurrency() throws Exception {
// We setup a delay in the OperationLog process. We only do this for a stand-alone StreamSegment because the process
// is driven by the same code for Transactions as well.
final String segmentName = "Segment";
final long segmentId = 12345;
final String firstResult = "first";
final String secondResult = "second";
final String thirdResult = "third";
HashSet<String> storageSegments = new HashSet<>();
storageSegments.add(segmentName);
@Cleanup TestContext context = new TestContext();
setupStorageGetHandler(context, storageSegments, sn -> StreamSegmentInformation.builder().name(sn).build());
CompletableFuture<Void> initialAddFuture = new CompletableFuture<>();
CompletableFuture<Void> addInvoked = new CompletableFuture<>();
AtomicBoolean operationLogInvoked = new AtomicBoolean(false);
context.operationLog.addHandler = op -> {
if (!(op instanceof StreamSegmentMapOperation)) {
return Futures.failedFuture(new IllegalArgumentException("unexpected operation"));
}
if (operationLogInvoked.getAndSet(true)) {
return Futures.failedFuture(new IllegalStateException("multiple calls to OperationLog.add"));
}
// Need to set SegmentId on operation.
((StreamSegmentMapOperation) op).setStreamSegmentId(segmentId);
UpdateableSegmentMetadata segmentMetadata = context.metadata.mapStreamSegmentId(segmentName, segmentId);
segmentMetadata.setStorageLength(0);
segmentMetadata.setLength(0);
addInvoked.complete(null);
return initialAddFuture;
};
List<Integer> invocationOrder = Collections.synchronizedList(new ArrayList<>());
// Second call is designed to hit when the first call still tries to assign the id, hence we test normal queueing.
CompletableFuture<String> firstCall = context.mapper.getOrAssignStreamSegmentId(segmentName, TIMEOUT, id -> {
Assert.assertEquals("Unexpected SegmentId (first).", segmentId, (long) id);
invocationOrder.add(1);
return CompletableFuture.completedFuture(firstResult);
});
CompletableFuture<String> secondCall = context.mapper.getOrAssignStreamSegmentId(segmentName, TIMEOUT, id -> {
Assert.assertEquals("Unexpected SegmentId (second).", segmentId, (long) id);
invocationOrder.add(2);
return CompletableFuture.completedFuture(secondResult);
});
// Wait for the metadata to be updated properly.
addInvoked.join();
Assert.assertFalse("getOrAssignStreamSegmentId (first call) returned before OperationLog finished.", firstCall.isDone());
Assert.assertFalse("getOrAssignStreamSegmentId (second call) returned before OperationLog finished.", secondCall.isDone());
// Third call is designed to hit after the metadata has been updated, but prior to the other callbacks being invoked.
// It verifies that even in that case it still executes in order.
CompletableFuture<String> thirdCall = context.mapper.getOrAssignStreamSegmentId(segmentName, TIMEOUT, id -> {
Assert.assertEquals("Unexpected SegmentId (second).", segmentId, (long) id);
invocationOrder.add(3);
return CompletableFuture.completedFuture(thirdResult);
});
initialAddFuture.complete(null);
Assert.assertEquals("Unexpected result from firstCall.", firstResult, firstCall.join());
Assert.assertEquals("Unexpected result from secondCall.", secondResult, secondCall.join());
Assert.assertEquals("Unexpected result from thirdCall.", thirdResult, thirdCall.join());
val expectedOrder = Arrays.asList(1, 2, 3);
AssertExtensions.assertListEquals("", expectedOrder, invocationOrder, Integer::equals);
}
use of io.pravega.segmentstore.server.logs.operations.StreamSegmentMapOperation in project pravega by pravega.
the class ContainerMetadataUpdateTransactionTests method testProcessMetadataCheckpointIgnored.
/**
* Tests the processMetadataOperation method with MetadataCheckpoint operations, when such checkpoints are skipped over
* because they are after other operations.
*/
@Test
public void testProcessMetadataCheckpointIgnored() throws Exception {
long newSegmentId = 897658;
String newSegmentName = "NewSegmentId";
AtomicLong seqNo = new AtomicLong();
// Create a non-empty metadata.
UpdateableContainerMetadata metadata = createMetadata();
val txn = createUpdateTransaction(metadata);
MetadataCheckpointOperation checkpointedMetadata = createMetadataCheckpoint();
processOperation(checkpointedMetadata, txn, seqNo::incrementAndGet);
// Create a blank metadata, and add an operation to the updater (which would result in mapping a new StreamSegment).
metadata = createBlankMetadata();
metadata.enterRecoveryMode();
val txn2 = createUpdateTransaction(metadata);
StreamSegmentMapOperation mapOp = createMap(newSegmentName);
mapOp.setStreamSegmentId(newSegmentId);
processOperation(mapOp, txn2, seqNo::incrementAndGet);
// Now try to process the checkpoint
processOperation(checkpointedMetadata, txn2, seqNo::incrementAndGet);
txn2.commit(metadata);
// Verify the checkpointed metadata hasn't been applied
Assert.assertNull("Newly added StreamSegment Id was not removed after applying checkpoint.", metadata.getStreamSegmentMetadata(mapOp.getStreamSegmentId()));
Assert.assertNotNull("Checkpoint seems to have not been applied.", metadata.getStreamSegmentMetadata(SEGMENT_ID));
}
Aggregations