use of io.pravega.segmentstore.server.logs.operations.CachedStreamSegmentAppendOperation in project pravega by pravega.
the class SegmentAggregator method reconcileAppendOperation.
/**
* Attempts to reconcile the given Append Operation. Since Append Operations can be partially flushed, reconciliation
* may be for the full operation or for a part of it.
*
* @param op The Operation (StreamSegmentAppendOperation or CachedStreamSegmentAppendOperation) to reconcile.
* @param storageInfo The current state of the Segment in Storage.
* @param timer Timer for the operation.
* @return A CompletableFuture containing a FlushResult with the number of bytes reconciled, or failed with a ReconciliationFailureException,
* if the operation cannot be reconciled, based on the in-memory metadata or the current state of the Segment in Storage.
*/
private CompletableFuture<FlushResult> reconcileAppendOperation(StorageOperation op, SegmentProperties storageInfo, TimeoutTimer timer) {
Preconditions.checkArgument(op instanceof AggregatedAppendOperation, "Not given an append operation.");
// Read data from Storage, and compare byte-by-byte.
InputStream appendStream = this.dataSource.getAppendData(op.getStreamSegmentId(), op.getStreamSegmentOffset(), (int) op.getLength());
if (appendStream == null) {
return Futures.failedFuture(new ReconciliationFailureException(String.format("Unable to reconcile operation '%s' because no append data is associated with it.", op), this.metadata, storageInfo));
}
// Only read as much data as we need.
long readLength = Math.min(op.getLastStreamSegmentOffset(), storageInfo.getLength()) - op.getStreamSegmentOffset();
assert readLength > 0 : "Append Operation to be reconciled is beyond the Segment's StorageLength " + op;
AtomicInteger bytesReadSoFar = new AtomicInteger();
// Read all data from storage.
byte[] storageData = new byte[(int) readLength];
return Futures.loop(() -> bytesReadSoFar.get() < readLength, () -> this.storage.read(this.handle.get(), op.getStreamSegmentOffset() + bytesReadSoFar.get(), storageData, bytesReadSoFar.get(), (int) readLength - bytesReadSoFar.get(), timer.getRemaining()), bytesRead -> {
assert bytesRead > 0 : String.format("Unable to make any read progress when reconciling operation '%s' after reading %s bytes.", op, bytesReadSoFar);
bytesReadSoFar.addAndGet(bytesRead);
}, this.executor).thenApplyAsync(v -> {
// Compare, byte-by-byte, the contents of the append.
verifySame(appendStream, storageData, op, storageInfo);
if (readLength >= op.getLength() && op.getLastStreamSegmentOffset() <= storageInfo.getLength()) {
// Operation has been completely validated; pop it off the list.
StorageOperation removedOp = this.operations.removeFirst();
assert op == removedOp : "Reconciled operation is not the same as removed operation";
}
return new FlushResult().withFlushedBytes(readLength);
}, this.executor);
}
use of io.pravega.segmentstore.server.logs.operations.CachedStreamSegmentAppendOperation in project pravega by pravega.
the class StorageWriterTests method testCleanup.
/**
* Tests the ability of the StorageWriter to cleanup SegmentAggregators that have been deleted in Storage or are
* gone from the Metadata.
* 1. Creates 3 segments, and adds an append for each of them.
* 2. Marks segment 2 as deleted (in metadata) and evicts segment 3 from metadata (no deletion).
* 3. Runs one more Writer cycle (to clean up).
* 4. Reinstates the missing segment metadatas and adds appends for each of them, verifying that the Writer re-requests
* the metadata for those two.
*/
@Test
public void testCleanup() throws Exception {
final WriterConfig config = WriterConfig.builder().with(WriterConfig.FLUSH_THRESHOLD_BYTES, // This differs from DEFAULT_CONFIG.
1).with(WriterConfig.FLUSH_ATTRIBUTES_THRESHOLD, 1).with(WriterConfig.FLUSH_THRESHOLD_MILLIS, 1000L).with(WriterConfig.MIN_READ_TIMEOUT_MILLIS, 10L).with(WriterConfig.MAX_READ_TIMEOUT_MILLIS, 250L).with(WriterConfig.MAX_ITEMS_TO_READ_AT_ONCE, 100).with(WriterConfig.ERROR_SLEEP_MILLIS, 0L).build();
@Cleanup TestContext context = new TestContext(config);
context.writer.startAsync();
// Create a bunch of segments and Transaction.
final ArrayList<Long> segmentIds = createSegments(context);
final UpdateableSegmentMetadata segment1 = context.metadata.getStreamSegmentMetadata(segmentIds.get(0));
final UpdateableSegmentMetadata segment2 = context.metadata.getStreamSegmentMetadata(segmentIds.get(1));
final UpdateableSegmentMetadata segment3 = context.metadata.getStreamSegmentMetadata(segmentIds.get(2));
final byte[] data = new byte[1];
Function<UpdateableSegmentMetadata, Operation> createAppend = segment -> {
StreamSegmentAppendOperation append = new StreamSegmentAppendOperation(segment.getId(), new ByteArraySegment(data), null);
append.setStreamSegmentOffset(segment.getLength());
context.dataSource.recordAppend(append);
segment.setLength(segment.getLength() + data.length);
return new CachedStreamSegmentAppendOperation(append);
};
// Process an append for each segment, to make sure the writer has knowledge of those segments.
context.dataSource.add(createAppend.apply(segment1));
context.dataSource.add(createAppend.apply(segment2));
context.dataSource.add(createAppend.apply(segment3));
metadataCheckpoint(context);
context.dataSource.waitFullyAcked().get(TIMEOUT.toMillis(), TimeUnit.MILLISECONDS);
// Delete segment2 (markDeleted) and evict segment3 (by forcing the metadata to forget about it).
long evictionCutoff = context.metadata.nextOperationSequenceNumber() + 1;
context.metadata.getStreamSegmentId(segment1.getName(), true);
context.metadata.getStreamSegmentId(segment2.getName(), true);
segment2.markDeleted();
Collection<Long> evictedSegments = evictSegments(evictionCutoff, context);
// Make sure the right segment is evicted, and not the other two ones (there are other segments in this system which we don't care about).
Assert.assertTrue("Expected segment was not evicted.", evictedSegments.contains(segment3.getId()));
Assert.assertFalse("Unexpected segments were not evicted.", evictedSegments.contains(segment1.getId()) && evictedSegments.contains(segment3.getId()));
// Add one more append to Segment1 - this will force the writer to go on a full iteration and thus invoke cleanup.
context.dataSource.add(createAppend.apply(segment1));
metadataCheckpoint(context);
context.dataSource.waitFullyAcked().get(TIMEOUT.toMillis(), TimeUnit.MILLISECONDS);
// Get rid of Segment2 from the metadata.
evictionCutoff = context.metadata.nextOperationSequenceNumber() + 1;
context.metadata.getStreamSegmentId(segment1.getName(), true);
evictedSegments = evictSegments(evictionCutoff, context);
Assert.assertTrue("Expected segment was not evicted.", evictedSegments.contains(segment2.getId()));
// Repopulate the metadata.
val segment2Take2 = context.metadata.mapStreamSegmentId(segment2.getName(), segment2.getId());
val segment3Take2 = context.metadata.mapStreamSegmentId(segment3.getName(), segment3.getId());
segment2Take2.copyFrom(segment2);
segment3Take2.copyFrom(segment3);
// Add an append for each of the re-added segments and verify that the Writer re-requested the metadata, which
// indicates it had to recreate their SegmentAggregators.
HashSet<Long> requestedSegmentIds = new HashSet<>();
context.dataSource.setSegmentMetadataRequested(requestedSegmentIds::add);
context.dataSource.add(createAppend.apply(segment2Take2));
context.dataSource.add(createAppend.apply(segment3Take2));
metadataCheckpoint(context);
context.dataSource.waitFullyAcked().get(TIMEOUT.toMillis(), TimeUnit.MILLISECONDS);
Assert.assertTrue("The deleted segments did not have their metadata requested.", requestedSegmentIds.contains(segment2.getId()) && requestedSegmentIds.contains(segment3.getId()));
}
use of io.pravega.segmentstore.server.logs.operations.CachedStreamSegmentAppendOperation in project pravega by pravega.
the class MemoryStateUpdaterTests method testProcess.
/**
* Tests the functionality of the process() method.
*/
@Test
public void testProcess() throws Exception {
int segmentCount = 10;
int operationCountPerType = 5;
// Add to MTL + Add to ReadIndex (append; beginMerge).
InMemoryLog opLog = new InMemoryLog();
val readIndex = mock(ReadIndex.class);
val triggerSegmentIds = new ArrayList<Long>();
doAnswer(x -> {
triggerSegmentIds.clear();
triggerSegmentIds.addAll(x.getArgument(0));
return null;
}).when(readIndex).triggerFutureReads(anyCollection());
val invocations = new ArrayList<InvocationOnMock>();
doAnswer(invocations::add).when(readIndex).append(anyLong(), anyLong(), any());
doAnswer(invocations::add).when(readIndex).beginMerge(anyLong(), anyLong(), anyLong());
MemoryStateUpdater updater = new MemoryStateUpdater(opLog, readIndex);
ArrayList<Operation> operations = populate(updater, segmentCount, operationCountPerType);
// Verify they were properly processed.
Queue<Operation> logIterator = opLog.poll(operations.size());
int currentIndex = -1;
val invocationIterator = invocations.iterator();
while (!logIterator.isEmpty()) {
currentIndex++;
Operation expected = operations.get(currentIndex);
Operation actual = logIterator.poll();
if (expected instanceof StorageOperation) {
val invokedMethod = invocationIterator.next();
if (expected instanceof StreamSegmentAppendOperation) {
Assert.assertTrue("StreamSegmentAppendOperation was not added as a CachedStreamSegmentAppendOperation to the Memory Log.", actual instanceof CachedStreamSegmentAppendOperation);
StreamSegmentAppendOperation appendOp = (StreamSegmentAppendOperation) expected;
Assert.assertEquals("Append with SeqNo " + expected.getSequenceNumber() + " was not added to the ReadIndex.", "append", invokedMethod.getMethod().getName());
Assert.assertEquals("Append with SeqNo " + expected.getSequenceNumber() + " was added to the ReadIndex with wrong arguments.", appendOp.getStreamSegmentId(), (long) invokedMethod.getArgument(0));
Assert.assertEquals("Append with SeqNo " + expected.getSequenceNumber() + " was added to the ReadIndex with wrong arguments.", appendOp.getStreamSegmentOffset(), (long) invokedMethod.getArgument(1));
Assert.assertEquals("Append with SeqNo " + expected.getSequenceNumber() + " was added to the ReadIndex with wrong arguments.", appendOp.getData(), invokedMethod.getArgument(2));
} else if (expected instanceof MergeSegmentOperation) {
MergeSegmentOperation mergeOp = (MergeSegmentOperation) expected;
Assert.assertEquals("Merge with SeqNo " + expected.getSequenceNumber() + " was not added to the ReadIndex.", "beginMerge", invokedMethod.getMethod().getName());
Assert.assertEquals("Merge with SeqNo " + expected.getSequenceNumber() + " was added to the ReadIndex with wrong arguments.", mergeOp.getStreamSegmentId(), (long) invokedMethod.getArgument(0));
Assert.assertEquals("Merge with SeqNo " + expected.getSequenceNumber() + " was added to the ReadIndex with wrong arguments.", mergeOp.getStreamSegmentOffset(), (long) invokedMethod.getArgument(1));
Assert.assertEquals("Merge with SeqNo " + expected.getSequenceNumber() + " was added to the ReadIndex with wrong arguments.", mergeOp.getSourceSegmentId(), (long) invokedMethod.getArgument(2));
}
}
}
// Verify triggerFutureReads args.
val expectedSegmentIds = operations.stream().filter(op -> op instanceof SegmentOperation).map(op -> ((SegmentOperation) op).getStreamSegmentId()).collect(Collectors.toSet());
AssertExtensions.assertContainsSameElements("ReadIndex.triggerFutureReads() was called with the wrong set of StreamSegmentIds.", expectedSegmentIds, triggerSegmentIds);
// Test DataCorruptionException.
AssertExtensions.assertThrows("MemoryStateUpdater accepted an operation that was out of order.", () -> updater.process(new MergeSegmentOperation(1, 2)), ex -> ex instanceof DataCorruptionException);
}
use of io.pravega.segmentstore.server.logs.operations.CachedStreamSegmentAppendOperation in project pravega by pravega.
the class MemoryStateUpdater method addToReadIndex.
/**
* Registers the given operation in the ReadIndex.
*
* @param operation The operation to register.
* @throws CacheFullException If the operation could not be added to the {@link ReadIndex} due to the cache being
* full and unable to evict anything to make room for more.
* @throws ServiceHaltException If any unexpected exception occurred that prevented the operation from being
* added to the {@link ReadIndex}. Unexpected exceptions are all exceptions other than those declared in this
* method or that indicate we are shutting down or that the segment has been deleted.
*/
private void addToReadIndex(StorageOperation operation) throws ServiceHaltException, CacheFullException {
try {
if (operation instanceof StreamSegmentAppendOperation) {
// Record a StreamSegmentAppendOperation. Just in case, we also support this type of operation, but we need to
// log a warning indicating so. This means we do not optimize memory properly, and we end up storing data
// in two different places.
StreamSegmentAppendOperation appendOperation = (StreamSegmentAppendOperation) operation;
this.readIndex.append(appendOperation.getStreamSegmentId(), appendOperation.getStreamSegmentOffset(), appendOperation.getData());
} else if (operation instanceof MergeSegmentOperation) {
// Record a MergeSegmentOperation. We call beginMerge here, and the StorageWriter will call completeMerge.
MergeSegmentOperation mergeOperation = (MergeSegmentOperation) operation;
this.readIndex.beginMerge(mergeOperation.getStreamSegmentId(), mergeOperation.getStreamSegmentOffset(), mergeOperation.getSourceSegmentId());
} else {
assert !(operation instanceof CachedStreamSegmentAppendOperation) : "attempted to add a CachedStreamSegmentAppendOperation to the ReadIndex";
}
} catch (ObjectClosedException | StreamSegmentNotExistsException ex) {
// The Segment is in the process of being deleted. We usually end up in here because a concurrent delete
// request has updated the metadata while we were executing.
log.warn("Not adding operation '{}' to ReadIndex because it refers to a deleted StreamSegment.", operation);
} catch (CacheFullException ex) {
// Record the operation that we couldn't add and re-throw the exception as we cannot do anything about it here.
log.warn("Not adding operation '{}' to ReadIndex because the Cache is full.", operation);
throw ex;
} catch (Exception ex) {
throw new ServiceHaltException(String.format("Unable to add operation '%s' to ReadIndex.", operation), ex);
}
}
use of io.pravega.segmentstore.server.logs.operations.CachedStreamSegmentAppendOperation in project pravega by pravega.
the class MemoryStateUpdater method process.
/**
* Processes the given operations and applies them to the ReadIndex and InMemory OperationLog.
*
* @param operations An Iterator iterating over the operations to process (in sequence).
* @param callback A Consumer that will be invoked on EVERY {@link Operation} in the operations iterator, in the
* order returned from the iterator, regardless of whether the operation was processed or not.
* @throws ServiceHaltException If a serious, non-recoverable state was detected, such as unable to create a
* CachedStreamSegmentAppendOperation.
* @throws CacheFullException If any operation in the given iterator contains data that needs to be added to the
* {@link ReadIndex} but it could not be done due to the cache being full and unable
* to evict anything to make room for more.
*/
void process(Iterator<Operation> operations, Consumer<Operation> callback) throws ServiceHaltException, CacheFullException {
HashSet<Long> segmentIds = new HashSet<>();
Operation op = null;
try {
while (operations.hasNext()) {
op = operations.next();
process(op);
callback.accept(op);
if (op instanceof SegmentOperation) {
// Record recent activity on stream segment, if applicable. This should be recorded for any kind
// of Operation that touches a Segment, since when we issue 'triggerFutureReads' on the readIndex,
// it should include 'sealed' StreamSegments too - any Future Reads waiting on that Offset will be cancelled.
segmentIds.add(((SegmentOperation) op).getStreamSegmentId());
}
}
op = null;
} catch (Throwable ex) {
// Invoke the callback on every remaining operation (including the failed one, which is no longer part of the iterator).
if (op != null) {
callback.accept(op);
}
operations.forEachRemaining(callback);
throw ex;
}
if (!this.recoveryMode.get()) {
// Trigger Future Reads on those segments which were touched by Appends or Seals.
this.readIndex.triggerFutureReads(segmentIds);
}
}
Aggregations