use of io.pravega.segmentstore.server.SegmentOperation in project pravega by pravega.
the class StreamSegmentContainerTests method testForSegmentPriority.
/**
* Tests {@link StreamSegmentContainer#forSegment(String, OperationPriority, Duration)}.
*/
@Test
public void testForSegmentPriority() throws Exception {
val segmentName = "Test";
@Cleanup val context = new TestContext(DEFAULT_CONFIG, NO_TRUNCATIONS_DURABLE_LOG_CONFIG, INFREQUENT_FLUSH_WRITER_CONFIG, null);
val durableLog = new AtomicReference<OperationLog>();
val durableLogFactory = new WatchableOperationLogFactory(context.operationLogFactory, durableLog::set);
@Cleanup val container = new StreamSegmentContainer(CONTAINER_ID, DEFAULT_CONFIG, durableLogFactory, context.readIndexFactory, context.attributeIndexFactory, new NoOpWriterFactory(), context.storageFactory, context.getDefaultExtensions(), executorService());
container.startAsync().awaitRunning();
container.createStreamSegment(segmentName, SegmentType.STREAM_SEGMENT, null, TIMEOUT).get(TIMEOUT.toMillis(), TimeUnit.MILLISECONDS);
// Create a few operations using the forSegment with desired priority.
val s1 = container.forSegment(segmentName, OperationPriority.Critical, TIMEOUT).get(TIMEOUT.toMillis(), TimeUnit.MILLISECONDS);
val futures = new ArrayList<CompletableFuture<Void>>();
futures.add(Futures.toVoid(s1.append(new ByteArraySegment(new byte[1]), null, TIMEOUT)));
futures.add(s1.updateAttributes(AttributeUpdateCollection.from(new AttributeUpdate(AttributeId.randomUUID(), AttributeUpdateType.Replace, 1)), TIMEOUT));
futures.add(s1.truncate(1, TIMEOUT));
futures.add(Futures.toVoid(s1.seal(TIMEOUT)));
Futures.allOf(futures).get(TIMEOUT.toMillis(), TimeUnit.MILLISECONDS);
// Await all operations to be added to the durable log, then fetch them all. We stop when we encounter the Seal we just added.
val ops = readDurableLog(durableLog.get(), op -> op instanceof StreamSegmentSealOperation);
// For those operations that we do care about, verify they have the right priority.
int count = 0;
for (val op : ops) {
if (op instanceof SegmentOperation && ((SegmentOperation) op).getStreamSegmentId() == s1.getSegmentId()) {
count++;
Assert.assertEquals("Unexpected priority for " + op, OperationPriority.Critical, op.getDesiredPriority());
}
}
AssertExtensions.assertGreaterThan("Expected at least one operation to be verified.", 0, count);
}
use of io.pravega.segmentstore.server.SegmentOperation in project pravega by pravega.
the class MemoryStateUpdaterTests method testProcess.
/**
* Tests the functionality of the process() method.
*/
@Test
public void testProcess() throws Exception {
int segmentCount = 10;
int operationCountPerType = 5;
// Add to MTL + Add to ReadIndex (append; beginMerge).
InMemoryLog opLog = new InMemoryLog();
val readIndex = mock(ReadIndex.class);
val triggerSegmentIds = new ArrayList<Long>();
doAnswer(x -> {
triggerSegmentIds.clear();
triggerSegmentIds.addAll(x.getArgument(0));
return null;
}).when(readIndex).triggerFutureReads(anyCollection());
val invocations = new ArrayList<InvocationOnMock>();
doAnswer(invocations::add).when(readIndex).append(anyLong(), anyLong(), any());
doAnswer(invocations::add).when(readIndex).beginMerge(anyLong(), anyLong(), anyLong());
MemoryStateUpdater updater = new MemoryStateUpdater(opLog, readIndex);
ArrayList<Operation> operations = populate(updater, segmentCount, operationCountPerType);
// Verify they were properly processed.
Queue<Operation> logIterator = opLog.poll(operations.size());
int currentIndex = -1;
val invocationIterator = invocations.iterator();
while (!logIterator.isEmpty()) {
currentIndex++;
Operation expected = operations.get(currentIndex);
Operation actual = logIterator.poll();
if (expected instanceof StorageOperation) {
val invokedMethod = invocationIterator.next();
if (expected instanceof StreamSegmentAppendOperation) {
Assert.assertTrue("StreamSegmentAppendOperation was not added as a CachedStreamSegmentAppendOperation to the Memory Log.", actual instanceof CachedStreamSegmentAppendOperation);
StreamSegmentAppendOperation appendOp = (StreamSegmentAppendOperation) expected;
Assert.assertEquals("Append with SeqNo " + expected.getSequenceNumber() + " was not added to the ReadIndex.", "append", invokedMethod.getMethod().getName());
Assert.assertEquals("Append with SeqNo " + expected.getSequenceNumber() + " was added to the ReadIndex with wrong arguments.", appendOp.getStreamSegmentId(), (long) invokedMethod.getArgument(0));
Assert.assertEquals("Append with SeqNo " + expected.getSequenceNumber() + " was added to the ReadIndex with wrong arguments.", appendOp.getStreamSegmentOffset(), (long) invokedMethod.getArgument(1));
Assert.assertEquals("Append with SeqNo " + expected.getSequenceNumber() + " was added to the ReadIndex with wrong arguments.", appendOp.getData(), invokedMethod.getArgument(2));
} else if (expected instanceof MergeSegmentOperation) {
MergeSegmentOperation mergeOp = (MergeSegmentOperation) expected;
Assert.assertEquals("Merge with SeqNo " + expected.getSequenceNumber() + " was not added to the ReadIndex.", "beginMerge", invokedMethod.getMethod().getName());
Assert.assertEquals("Merge with SeqNo " + expected.getSequenceNumber() + " was added to the ReadIndex with wrong arguments.", mergeOp.getStreamSegmentId(), (long) invokedMethod.getArgument(0));
Assert.assertEquals("Merge with SeqNo " + expected.getSequenceNumber() + " was added to the ReadIndex with wrong arguments.", mergeOp.getStreamSegmentOffset(), (long) invokedMethod.getArgument(1));
Assert.assertEquals("Merge with SeqNo " + expected.getSequenceNumber() + " was added to the ReadIndex with wrong arguments.", mergeOp.getSourceSegmentId(), (long) invokedMethod.getArgument(2));
}
}
}
// Verify triggerFutureReads args.
val expectedSegmentIds = operations.stream().filter(op -> op instanceof SegmentOperation).map(op -> ((SegmentOperation) op).getStreamSegmentId()).collect(Collectors.toSet());
AssertExtensions.assertContainsSameElements("ReadIndex.triggerFutureReads() was called with the wrong set of StreamSegmentIds.", expectedSegmentIds, triggerSegmentIds);
// Test DataCorruptionException.
AssertExtensions.assertThrows("MemoryStateUpdater accepted an operation that was out of order.", () -> updater.process(new MergeSegmentOperation(1, 2)), ex -> ex instanceof DataCorruptionException);
}
use of io.pravega.segmentstore.server.SegmentOperation in project pravega by pravega.
the class MemoryStateUpdater method process.
/**
* Processes the given operations and applies them to the ReadIndex and InMemory OperationLog.
*
* @param operations An Iterator iterating over the operations to process (in sequence).
* @param callback A Consumer that will be invoked on EVERY {@link Operation} in the operations iterator, in the
* order returned from the iterator, regardless of whether the operation was processed or not.
* @throws ServiceHaltException If a serious, non-recoverable state was detected, such as unable to create a
* CachedStreamSegmentAppendOperation.
* @throws CacheFullException If any operation in the given iterator contains data that needs to be added to the
* {@link ReadIndex} but it could not be done due to the cache being full and unable
* to evict anything to make room for more.
*/
void process(Iterator<Operation> operations, Consumer<Operation> callback) throws ServiceHaltException, CacheFullException {
HashSet<Long> segmentIds = new HashSet<>();
Operation op = null;
try {
while (operations.hasNext()) {
op = operations.next();
process(op);
callback.accept(op);
if (op instanceof SegmentOperation) {
// Record recent activity on stream segment, if applicable. This should be recorded for any kind
// of Operation that touches a Segment, since when we issue 'triggerFutureReads' on the readIndex,
// it should include 'sealed' StreamSegments too - any Future Reads waiting on that Offset will be cancelled.
segmentIds.add(((SegmentOperation) op).getStreamSegmentId());
}
}
op = null;
} catch (Throwable ex) {
// Invoke the callback on every remaining operation (including the failed one, which is no longer part of the iterator).
if (op != null) {
callback.accept(op);
}
operations.forEachRemaining(callback);
throw ex;
}
if (!this.recoveryMode.get()) {
// Trigger Future Reads on those segments which were touched by Appends or Seals.
this.readIndex.triggerFutureReads(segmentIds);
}
}
use of io.pravega.segmentstore.server.SegmentOperation in project pravega by pravega.
the class ContainerMetadataUpdateTransaction method preProcessOperation.
/**
* Pre-processes the given Operation. See OperationMetadataUpdater.preProcessOperation for more details on behavior.
*
* @param operation The operation to pre-process.
* @throws ContainerException If the given operation was rejected given the current state of the container metadata.
* @throws StreamSegmentException If the given operation was incompatible with the current state of the Segment.
* For example: StreamSegmentNotExistsException, StreamSegmentSealedException or
* StreamSegmentMergedException.
*/
void preProcessOperation(Operation operation) throws ContainerException, StreamSegmentException {
checkNotSealed();
if (operation instanceof SegmentOperation) {
val segmentMetadata = getSegmentUpdateTransaction(((SegmentOperation) operation).getStreamSegmentId());
if (segmentMetadata.isDeleted()) {
throw new StreamSegmentNotExistsException(segmentMetadata.getName());
}
if (operation instanceof StreamSegmentAppendOperation) {
segmentMetadata.preProcessOperation((StreamSegmentAppendOperation) operation);
} else if (operation instanceof StreamSegmentSealOperation) {
segmentMetadata.preProcessOperation((StreamSegmentSealOperation) operation);
} else if (operation instanceof MergeSegmentOperation) {
MergeSegmentOperation mbe = (MergeSegmentOperation) operation;
SegmentMetadataUpdateTransaction sourceMetadata = getSegmentUpdateTransaction(mbe.getSourceSegmentId());
sourceMetadata.preProcessAsSourceSegment(mbe);
segmentMetadata.preProcessAsTargetSegment(mbe, sourceMetadata);
} else if (operation instanceof UpdateAttributesOperation) {
segmentMetadata.preProcessOperation((UpdateAttributesOperation) operation);
} else if (operation instanceof StreamSegmentTruncateOperation) {
segmentMetadata.preProcessOperation((StreamSegmentTruncateOperation) operation);
} else if (operation instanceof DeleteSegmentOperation) {
segmentMetadata.preProcessOperation((DeleteSegmentOperation) operation);
}
}
if (operation instanceof MetadataCheckpointOperation) {
// MetadataCheckpointOperations do not require preProcess and accept; they can be handled in a single stage.
processMetadataOperation((MetadataCheckpointOperation) operation);
} else if (operation instanceof StorageMetadataCheckpointOperation) {
// StorageMetadataCheckpointOperation do not require preProcess and accept; they can be handled in a single stage.
processMetadataOperation((StorageMetadataCheckpointOperation) operation);
} else if (operation instanceof StreamSegmentMapOperation) {
preProcessMetadataOperation((StreamSegmentMapOperation) operation);
}
}
use of io.pravega.segmentstore.server.SegmentOperation in project pravega by pravega.
the class StreamSegmentContainerTests method testExtensions.
/**
* Tests the ability to register extensions.
*/
@Test
public void testExtensions() throws Exception {
String segmentName = getSegmentName(123);
ByteArraySegment data = getAppendData(segmentName, 0);
// Configure extension.
val operationProcessed = new CompletableFuture<SegmentOperation>();
AtomicInteger count = new AtomicInteger();
val writerProcessor = new TestWriterProcessor(op -> {
if (op.getStreamSegmentId() != EXPECTED_METADATA_SEGMENT_ID) {
// We need to exclude any appends that come from the MetadataStore as those do not concern us.
count.incrementAndGet();
if (!operationProcessed.isDone()) {
operationProcessed.complete(op);
}
}
});
val extension = new AtomicReference<TestSegmentContainerExtension>();
SegmentContainerFactory.CreateExtensions additionalExtensions = (container, executor) -> {
Assert.assertTrue("Already created", extension.compareAndSet(null, new TestSegmentContainerExtension(Collections.singleton(writerProcessor))));
return Collections.singletonMap(TestSegmentContainerExtension.class, extension.get());
};
@Cleanup val context = new TestContext(DEFAULT_CONFIG, additionalExtensions);
context.container.startAsync().awaitRunning();
// Verify getExtension().
val p = context.container.getExtension(TestSegmentContainerExtension.class);
Assert.assertEquals("Unexpected result from getExtension().", extension.get(), p);
// Verify Writer Segment Processors are properly wired in.
context.container.createStreamSegment(segmentName, getSegmentType(segmentName), null, TIMEOUT).join();
context.container.append(segmentName, data, null, TIMEOUT).join();
val rawOp = operationProcessed.get(TIMEOUT.toMillis(), TimeUnit.MILLISECONDS);
Assert.assertTrue("Unexpected operation type.", rawOp instanceof CachedStreamSegmentAppendOperation);
// Our operation has been transformed into a CachedStreamSegmentAppendOperation, which means it just points to
// a location in the cache. We do not have access to that cache, so we can only verify its metadata.
val appendOp = (CachedStreamSegmentAppendOperation) rawOp;
Assert.assertEquals("Unexpected offset.", 0, appendOp.getStreamSegmentOffset());
Assert.assertEquals("Unexpected data length.", data.getLength(), appendOp.getLength());
Assert.assertNull("Unexpected attribute updates.", appendOp.getAttributeUpdates());
// Verify extension is closed when the SegmentContainer is closed.
context.container.close();
Assert.assertTrue("Extension not closed.", extension.get().closed.get());
}
Aggregations