use of io.pravega.segmentstore.server.logs.operations.Operation in project pravega by pravega.
the class OperationProcessorTests method testWithDataLogNotPrimaryException.
/**
* Tests the ability of the OperationProcessor handle a DataLogWriterNotPrimaryException.
*/
@Test
public void testWithDataLogNotPrimaryException() throws Exception {
int streamSegmentCount = 1;
int appendsPerStreamSegment = 1;
@Cleanup TestContext context = new TestContext();
// Generate some test data (no need to complicate ourselves with Transactions here; that is tested in the no-failure test).
HashSet<Long> streamSegmentIds = createStreamSegmentsInMetadata(streamSegmentCount, context.metadata);
List<Operation> operations = generateOperations(streamSegmentIds, new HashMap<>(), appendsPerStreamSegment, METADATA_CHECKPOINT_EVERY, false, false);
// Setup an OperationProcessor and start it.
@Cleanup TestDurableDataLog dataLog = TestDurableDataLog.create(CONTAINER_ID, MAX_DATA_LOG_APPEND_SIZE, executorService());
dataLog.initialize(TIMEOUT);
@Cleanup OperationProcessor operationProcessor = new OperationProcessor(context.metadata, context.stateUpdater, dataLog, getNoOpCheckpointPolicy(), executorService());
operationProcessor.startAsync().awaitRunning();
ErrorInjector<Exception> aSyncErrorInjector = new ErrorInjector<>(count -> true, () -> new CompletionException(new DataLogWriterNotPrimaryException("intentional")));
dataLog.setAppendErrorInjectors(null, aSyncErrorInjector);
// Process all generated operations.
List<OperationWithCompletion> completionFutures = processOperations(operations, operationProcessor);
// Wait for all such operations to complete. We are expecting exceptions, so verify that we do.
AssertExtensions.assertThrows("No operations failed.", OperationWithCompletion.allOf(completionFutures)::join, ex -> ex instanceof IOException || ex instanceof DataLogWriterNotPrimaryException);
// Verify that the OperationProcessor automatically shuts down and that it has the right failure cause.
ServiceListeners.awaitShutdown(operationProcessor, TIMEOUT, false);
Assert.assertEquals("OperationProcessor is not in a failed state after fence-out detected.", Service.State.FAILED, operationProcessor.state());
Assert.assertTrue("OperationProcessor did not fail with the correct exception.", operationProcessor.failureCause() instanceof DataLogWriterNotPrimaryException);
}
use of io.pravega.segmentstore.server.logs.operations.Operation in project pravega by pravega.
the class OperationProcessorTests method testConcurrentStopAndCommit.
/**
* Tests a scenario where the OperationProcessor is shut down while a DataFrame is being processed and will eventually
* complete successfully - however its operation should be cancelled.
*/
@Test
public void testConcurrentStopAndCommit() throws Exception {
@Cleanup TestContext context = new TestContext();
// Generate some test data.
val segmentId = createStreamSegmentsInMetadata(1, context.metadata).stream().findFirst().orElse(-1L);
List<Operation> operations = Collections.singletonList(new StreamSegmentAppendOperation(segmentId, new byte[1], null));
CompletableFuture<LogAddress> appendCallback = new CompletableFuture<>();
// Setup an OperationProcessor with a custom DurableDataLog and start it.
@Cleanup DurableDataLog dataLog = new ManualAppendOnlyDurableDataLog(() -> appendCallback);
dataLog.initialize(TIMEOUT);
@Cleanup OperationProcessor operationProcessor = new OperationProcessor(context.metadata, context.stateUpdater, dataLog, getNoOpCheckpointPolicy(), executorService());
operationProcessor.startAsync().awaitRunning();
// Process all generated operations.
OperationWithCompletion completionFuture = processOperations(operations, operationProcessor).stream().findFirst().orElse(null);
operationProcessor.stopAsync();
appendCallback.complete(new TestLogAddress(1));
// Stop the processor.
operationProcessor.awaitTerminated();
// Wait for the operation to complete. The operation should have been cancelled (due to the OperationProcessor
// shutting down) - no other exception (or successful completion is accepted).
AssertExtensions.assertThrows("Operation did not fail with the right exception.", () -> completionFuture.completion, ex -> ex instanceof CancellationException || ex instanceof ObjectClosedException);
}
use of io.pravega.segmentstore.server.logs.operations.Operation in project pravega by pravega.
the class DurableLog method triggerTailReads.
private void triggerTailReads() {
this.executor.execute(() -> {
// Gather all the eligible tail reads.
List<TailRead> toTrigger;
synchronized (this.tailReads) {
Operation lastOp = this.inMemoryOperationLog.getLast();
if (lastOp != null) {
long seqNo = lastOp.getSequenceNumber();
toTrigger = this.tailReads.stream().filter(e -> e.afterSequenceNumber < seqNo).collect(Collectors.toList());
} else {
toTrigger = Collections.emptyList();
}
}
// Trigger all of them (no need to unregister them; the unregister handle is already wired up).
for (TailRead tr : toTrigger) {
tr.future.complete(Futures.runOrFail(() -> this.inMemoryOperationLog.read(tr.afterSequenceNumber, tr.maxCount), tr.future));
}
});
}
use of io.pravega.segmentstore.server.logs.operations.Operation in project pravega by pravega.
the class DurableLog method truncate.
@Override
public CompletableFuture<Void> truncate(long upToSequenceNumber, Duration timeout) {
ensureRunning();
Preconditions.checkArgument(this.metadata.isValidTruncationPoint(upToSequenceNumber), "Invalid Truncation Point. Must refer to a MetadataCheckpointOperation.");
// The SequenceNumber we were given points directly to a MetadataCheckpointOperation. We must not remove it!
// Instead, it must be the first operation that does survive, so we need to adjust our SeqNo to the one just
// before it.
long actualTruncationSequenceNumber = upToSequenceNumber - 1;
// Find the closest Truncation Marker (that does not exceed it).
LogAddress truncationFrameAddress = this.metadata.getClosestTruncationMarker(actualTruncationSequenceNumber);
if (truncationFrameAddress == null) {
// Nothing to truncate.
return CompletableFuture.completedFuture(null);
}
TimeoutTimer timer = new TimeoutTimer(timeout);
log.info("{}: Truncate (OperationSequenceNumber = {}, DataFrameAddress = {}).", this.traceObjectId, upToSequenceNumber, truncationFrameAddress);
// info will be readily available upon recovery without delay.
return add(new StorageMetadataCheckpointOperation(), timer.getRemaining()).thenComposeAsync(v -> this.durableDataLog.truncate(truncationFrameAddress, timer.getRemaining()), this.executor).thenRunAsync(() -> {
// Truncate InMemory Transaction Log.
int count = this.inMemoryOperationLog.truncate(actualTruncationSequenceNumber);
// Remove old truncation markers.
this.metadata.removeTruncationMarkers(actualTruncationSequenceNumber);
this.operationProcessor.getMetrics().operationLogTruncate(count);
}, this.executor);
}
use of io.pravega.segmentstore.server.logs.operations.Operation in project pravega by pravega.
the class OperationProcessor method processOperation.
/**
* Processes a single operation.
* Steps:
* <ol>
* <li> Pre-processes operation (in MetadataUpdater).
* <li> Assigns Sequence Number.
* <li> Appends to DataFrameBuilder.
* <li> Accepts operation in MetadataUpdater.
* </ol>
*
* @param operation The operation to process.
* @throws Exception If an exception occurred while processing this operation. Depending on the type of the exception,
* this could be due to the operation itself being invalid, or because we are unable to process any more operations.
*/
private void processOperation(CompletableOperation operation) throws Exception {
Preconditions.checkState(!operation.isDone(), "The Operation has already been processed.");
Operation entry = operation.getOperation();
if (!entry.canSerialize()) {
// This operation cannot be serialized, so don't bother doing anything with it.
return;
}
synchronized (this.stateLock) {
// Update Metadata and Operations with any missing data (offsets, lengths, etc) - the Metadata Updater
// has all the knowledge for that task.
this.metadataUpdater.preProcessOperation(entry);
// Entry is ready to be serialized; assign a sequence number.
entry.setSequenceNumber(this.metadataUpdater.nextOperationSequenceNumber());
this.dataFrameBuilder.append(entry);
this.metadataUpdater.acceptOperation(entry);
}
log.trace("{}: DataFrameBuilder.Append {}.", this.traceObjectId, entry);
}
Aggregations