use of io.pravega.segmentstore.server.logs.operations.CachedStreamSegmentAppendOperation in project pravega by pravega.
the class StreamSegmentContainerTests method testExtensions.
/**
* Tests the ability to register extensions.
*/
@Test
public void testExtensions() throws Exception {
String segmentName = getSegmentName(123);
ByteArraySegment data = getAppendData(segmentName, 0);
// Configure extension.
val operationProcessed = new CompletableFuture<SegmentOperation>();
AtomicInteger count = new AtomicInteger();
val writerProcessor = new TestWriterProcessor(op -> {
if (op.getStreamSegmentId() != EXPECTED_METADATA_SEGMENT_ID) {
// We need to exclude any appends that come from the MetadataStore as those do not concern us.
count.incrementAndGet();
if (!operationProcessed.isDone()) {
operationProcessed.complete(op);
}
}
});
val extension = new AtomicReference<TestSegmentContainerExtension>();
SegmentContainerFactory.CreateExtensions additionalExtensions = (container, executor) -> {
Assert.assertTrue("Already created", extension.compareAndSet(null, new TestSegmentContainerExtension(Collections.singleton(writerProcessor))));
return Collections.singletonMap(TestSegmentContainerExtension.class, extension.get());
};
@Cleanup val context = new TestContext(DEFAULT_CONFIG, additionalExtensions);
context.container.startAsync().awaitRunning();
// Verify getExtension().
val p = context.container.getExtension(TestSegmentContainerExtension.class);
Assert.assertEquals("Unexpected result from getExtension().", extension.get(), p);
// Verify Writer Segment Processors are properly wired in.
context.container.createStreamSegment(segmentName, getSegmentType(segmentName), null, TIMEOUT).join();
context.container.append(segmentName, data, null, TIMEOUT).join();
val rawOp = operationProcessed.get(TIMEOUT.toMillis(), TimeUnit.MILLISECONDS);
Assert.assertTrue("Unexpected operation type.", rawOp instanceof CachedStreamSegmentAppendOperation);
// Our operation has been transformed into a CachedStreamSegmentAppendOperation, which means it just points to
// a location in the cache. We do not have access to that cache, so we can only verify its metadata.
val appendOp = (CachedStreamSegmentAppendOperation) rawOp;
Assert.assertEquals("Unexpected offset.", 0, appendOp.getStreamSegmentOffset());
Assert.assertEquals("Unexpected data length.", data.getLength(), appendOp.getLength());
Assert.assertNull("Unexpected attribute updates.", appendOp.getAttributeUpdates());
// Verify extension is closed when the SegmentContainer is closed.
context.container.close();
Assert.assertTrue("Extension not closed.", extension.get().closed.get());
}
use of io.pravega.segmentstore.server.logs.operations.CachedStreamSegmentAppendOperation in project pravega by pravega.
the class DurableLogTests method testRecoveryPartialOperations.
/**
* Tests the ability of the DurableLog properly recover from situations where operations were split across multiple
* DataFrames, but were not persisted in their entirety. These operations should be ignored as they are incomplete
* and were never acknowledged to the upstream callers.
*/
@Test
public void testRecoveryPartialOperations() {
// Setup the first Durable Log and create the segment.
@Cleanup ContainerSetup setup = new ContainerSetup(executorService());
@Cleanup DurableLog dl1 = setup.createDurableLog();
dl1.startAsync().awaitRunning();
Assert.assertNotNull("Internal error: could not grab a pointer to the created TestDurableDataLog.", setup.dataLog.get());
val segmentId = createStreamSegmentsWithOperations(1, dl1).stream().findFirst().orElse(-1L);
// Part of this operation should fail.
ErrorInjector<Exception> asyncErrorInjector = new ErrorInjector<>(count -> count == 1, () -> new DurableDataLogException("intentional"));
setup.dataLog.get().setAppendErrorInjectors(null, asyncErrorInjector);
val append1 = new StreamSegmentAppendOperation(segmentId, new ByteArraySegment(new byte[MAX_DATA_LOG_APPEND_SIZE]), null);
AssertExtensions.assertSuppliedFutureThrows("Expected the operation to have failed.", () -> dl1.add(append1, OperationPriority.Normal, TIMEOUT), ex -> ex instanceof DurableDataLogException);
AssertExtensions.assertThrows("Expected the DurableLog to have failed after failed operation.", dl1::awaitTerminated, ex -> ex instanceof IllegalStateException);
dl1.close();
setup.dataLog.get().setAppendErrorInjectors(null, null);
// Setup the second Durable Log. Ensure the recovery succeeds and that we don't see that failed operation.
@Cleanup val dl2 = setup.createDurableLog();
dl2.startAsync().awaitRunning();
val ops2 = dl2.read(10, TIMEOUT).join();
Assert.assertTrue("Expected first operation to be a checkpoint.", !ops2.isEmpty() && ops2.poll() instanceof MetadataCheckpointOperation);
Assert.assertTrue("Expected second operation to be a segment map.", !ops2.isEmpty() && ops2.poll() instanceof StreamSegmentMapOperation);
Assert.assertTrue("Not expecting any other operations.", ops2.isEmpty());
// Add a new operation. This one should succeed.
val append2 = new StreamSegmentAppendOperation(segmentId, new ByteArraySegment(new byte[10]), null);
dl2.add(append2, OperationPriority.Normal, TIMEOUT).join();
dl2.stopAsync().awaitTerminated();
dl2.close();
// Setup the third Durable Log. Ensure the recovery succeeds that we only see the operations we care about.
@Cleanup val dl3 = setup.createDurableLog();
dl3.startAsync().awaitRunning();
val ops3 = dl3.read(10, TIMEOUT).join();
Assert.assertTrue("Expected first operation to be a checkpoint.", !ops3.isEmpty() && ops3.poll() instanceof MetadataCheckpointOperation);
Assert.assertTrue("Expected second operation to be a segment map.", !ops3.isEmpty() && ops3.poll() instanceof StreamSegmentMapOperation);
Assert.assertTrue("Expected third operation to be an append.", !ops3.isEmpty() && ops3.poll() instanceof CachedStreamSegmentAppendOperation);
Assert.assertTrue("Not expecting any other operations.", ops3.isEmpty());
dl2.stopAsync().awaitTerminated();
dl3.close();
}
use of io.pravega.segmentstore.server.logs.operations.CachedStreamSegmentAppendOperation in project pravega by pravega.
the class WriterTableProcessor method add.
// endregion
// region WriterSegmentProcessor Implementation
@Override
public void add(SegmentOperation operation) throws DataCorruptionException {
Exceptions.checkNotClosed(this.closed.get(), this);
Preconditions.checkArgument(operation.getStreamSegmentId() == this.connector.getMetadata().getId(), "Operation '%s' refers to a different Segment than this one (%s).", operation, this.connector.getMetadata().getId());
Preconditions.checkArgument(operation.getSequenceNumber() != Operation.NO_SEQUENCE_NUMBER, "Operation '%s' does not have a Sequence Number assigned.", operation);
if (this.connector.getMetadata().isDeleted() || !(operation instanceof CachedStreamSegmentAppendOperation)) {
// Segment is either deleted or this is not an append operation. Nothing for us to do here.
return;
}
CachedStreamSegmentAppendOperation append = (CachedStreamSegmentAppendOperation) operation;
if (this.lastAddedOffset.get() >= 0) {
// We have processed at least one operation so far. Verify operations are contiguous.
if (this.lastAddedOffset.get() != append.getStreamSegmentOffset()) {
throw new DataCorruptionException(String.format("Wrong offset for Operation '%s'. Expected: %s, actual: %d.", operation, this.lastAddedOffset, append.getStreamSegmentOffset()));
}
} else {
// offset and not skipping any updates.
if (this.aggregator.getLastIndexedOffset() < append.getStreamSegmentOffset()) {
throw new DataCorruptionException(String.format("Operation '%s' begins after TABLE_INDEXED_OFFSET. Expected: %s, actual: %d.", operation, this.aggregator.getLastIndexedOffset(), append.getStreamSegmentOffset()));
}
}
if (append.getStreamSegmentOffset() >= this.aggregator.getLastIndexedOffset()) {
// Operation has not been indexed yet; add it to the internal list so we can process it.
// NOTE: appends that contain more than one TableEntry (for batch updates) will be indexed atomically (either
// all Table Entries are indexed or none), so it is safe to compare this with the first offset of the append.
this.aggregator.add(append);
this.lastAddedOffset.set(append.getLastStreamSegmentOffset());
log.debug("{}: Add {} (State={}).", this.traceObjectId, operation, this.aggregator);
} else {
log.debug("{}: Skipped {} (State={}).", this.traceObjectId, operation, this.aggregator);
}
}
Aggregations