use of io.pravega.segmentstore.server.WriterSegmentProcessor in project pravega by pravega.
the class AckCalculator method getHighestCommittedSequenceNumber.
/**
* Determines the largest Sequence Number that can be safely truncated from the Writer's Data Source. All operations
* up to, and including the one for this Sequence Number have been successfully committed to External Storage.
*
* The Sequence Number we acknowledge has the property that all operations up to, and including it, have been
* committed to Storage.
*
* This can only be calculated by looking at all the active SegmentAggregators and picking the Lowest Uncommitted
* Sequence Number (LUSN) among all of those Aggregators that have any outstanding data. The LUSN for each aggregator
* has the property that, within the context of that Aggregator alone, all Operations that have a Sequence Number (SN)
* smaller than LUSN have been committed to Storage. As such, picking the smallest of all LUSN values across
* all the active SegmentAggregators will give us the highest SN that can be safely truncated out of the OperationLog.
* Note that LUSN still points to an uncommitted Operation, so we need to subtract 1 from it to obtain the highest SN
* that can be truncated up to (and including).
*
* If we have no active Aggregators, then we have committed all operations that were passed to us, so we can
* safely truncate up to LastReadSequenceNumber.
*
* As opposed from {@link #getLowestUncommittedSequenceNumber}, this method should be called for
* {@link WriterSegmentProcessor} instances that deal with different Segments.
*
* @param processors The {@link WriterSegmentProcessor} to inspect for commit status.
* @param <T> {@link WriterSegmentProcessor} type.
* @return The Highest Committed Sequence Number.
*/
<T extends WriterSegmentProcessor> long getHighestCommittedSequenceNumber(Iterable<T> processors) {
long lowestUncommittedSeqNo = Long.MAX_VALUE;
for (WriterSegmentProcessor a : processors) {
if (!a.isClosed()) {
long firstSeqNo = a.getLowestUncommittedSequenceNumber();
if (firstSeqNo >= 0) {
// Subtract 1 from the computed LUSN and then make sure it doesn't exceed the LastReadSequenceNumber
// (it would only exceed it if there are no aggregators or of they are all empty - which means we processed everything).
lowestUncommittedSeqNo = Math.min(lowestUncommittedSeqNo, firstSeqNo - 1);
}
}
}
lowestUncommittedSeqNo = Math.min(lowestUncommittedSeqNo, this.state.getLastReadSequenceNumber());
return lowestUncommittedSeqNo;
}
use of io.pravega.segmentstore.server.WriterSegmentProcessor in project pravega by pravega.
the class StorageWriterTests method testMultipleProcessors.
/**
* Tests the StorageWriter with multiple Segment Processors.
*/
@Test
public void testMultipleProcessors() throws Exception {
final int processorCount = 2;
val processors = new HashMap<Long, ArrayList<WriterSegmentProcessor>>();
WriterFactory.CreateProcessors createProcessors = sm -> {
Assert.assertFalse("Processors already created for segment " + sm, processors.containsKey(sm.getId()));
val result = new ArrayList<WriterSegmentProcessor>();
for (int i = 0; i < processorCount; i++) {
val p = new TestWriterProcessor();
result.add(p);
}
processors.put(sm.getId(), result);
return result;
};
@Cleanup TestContext context = new TestContext(DEFAULT_CONFIG, createProcessors);
context.writer.startAsync();
// Create a bunch of segments and Transactions.
ArrayList<Long> segmentIds = createSegments(context);
HashMap<Long, ArrayList<Long>> transactionsBySegment = createTransactions(segmentIds, context);
ArrayList<Long> transactionIds = new ArrayList<>();
transactionsBySegment.values().forEach(transactionIds::addAll);
// Append data.
HashMap<Long, ByteArrayOutputStream> segmentContents = new HashMap<>();
appendDataBreadthFirst(segmentIds, segmentContents, context);
appendDataBreadthFirst(transactionIds, segmentContents, context);
sealSegments(transactionIds, context);
mergeTransactions(transactionIds, segmentContents, context);
sealSegments(segmentIds, context);
// Wait for the writer to complete its job.
metadataCheckpoint(context);
context.dataSource.waitFullyAcked().get(TIMEOUT.toMillis(), TimeUnit.MILLISECONDS);
// Verify the processors.
for (val e : processors.entrySet()) {
// Verify they received the same operations.
val firstProcessor = (TestWriterProcessor) e.getValue().get(0);
AssertExtensions.assertGreaterThan("Expected at least one operation.", 0, firstProcessor.operations.size());
for (int i = 1; i < e.getValue().size(); i++) {
AssertExtensions.assertListEquals("Not all processors for the same segment received the same operations.", firstProcessor.operations, ((TestWriterProcessor) e.getValue().get(i)).operations, Object::equals);
}
// Verify they have all been flushed.
for (val p : e.getValue()) {
Assert.assertEquals("Not all processors were flushed.", -1, p.getLowestUncommittedSequenceNumber());
}
}
// Verify that the main Segment Aggregators were still able to do their jobs.
verifyFinalOutput(segmentContents, transactionIds, context);
context.writer.close();
for (val e : processors.entrySet()) {
for (val p : e.getValue()) {
Assert.assertTrue("Not all processors were closed.", p.isClosed());
}
}
}
Aggregations