use of io.pravega.segmentstore.server.SegmentMetadata in project pravega by pravega.
the class ContainerReadIndex method exitRecoveryMode.
@Override
public void exitRecoveryMode(boolean successfulRecovery) throws DataCorruptionException {
Exceptions.checkNotClosed(this.closed.get(), this);
Preconditions.checkState(this.isRecoveryMode(), "Read Index is not in recovery mode.");
synchronized (this.lock) {
assert this.preRecoveryMetadata != null : "preRecoveryMetadata is null, which should only be the case when we are not in recovery mode";
Preconditions.checkState(!this.preRecoveryMetadata.isRecoveryMode(), "Cannot take ReadIndex out of recovery: ContainerMetadata is still in recovery mode.");
if (successfulRecovery) {
// Validate that the metadata has been properly recovered and that we are still in sync with it.
for (Map.Entry<Long, StreamSegmentReadIndex> e : this.readIndices.entrySet()) {
SegmentMetadata metadata = this.preRecoveryMetadata.getStreamSegmentMetadata(e.getKey());
if (metadata == null) {
throw new DataCorruptionException(String.format("ContainerMetadata has no knowledge of StreamSegment Id %s.", e.getKey()));
}
e.getValue().exitRecoveryMode(metadata);
}
} else {
// Recovery was unsuccessful. Clear the contents of the ReadIndex to avoid further issues.
clear();
}
this.metadata = this.preRecoveryMetadata;
this.preRecoveryMetadata = null;
}
log.info("{} Exit RecoveryMode.", this.traceObjectId);
}
use of io.pravega.segmentstore.server.SegmentMetadata in project pravega by pravega.
the class SegmentAggregatorTests method testMergeWithStorageErrors.
/**
* Tests the flush() method with Append and MergeTransactionOperations.
*/
@Test
public void testMergeWithStorageErrors() throws Exception {
// Storage Errors
// This is number of appends per Segment/Transaction - there will be a lot of appends here.
final int appendCount = 100;
final int failSyncEvery = 2;
final int failAsyncEvery = 3;
final WriterConfig config = WriterConfig.builder().with(WriterConfig.FLUSH_THRESHOLD_BYTES, // Extra high length threshold.
appendCount * 50).with(WriterConfig.FLUSH_THRESHOLD_MILLIS, 1000L).with(WriterConfig.MAX_FLUSH_SIZE_BYTES, 10000).with(WriterConfig.MIN_READ_TIMEOUT_MILLIS, 10L).build();
@Cleanup TestContext context = new TestContext(config);
// Initialize all segments.
context.segmentAggregator.initialize(TIMEOUT).join();
for (SegmentAggregator a : context.transactionAggregators) {
a.initialize(TIMEOUT).join();
}
// Store written data by segment - so we can check it later.
HashMap<Long, ByteArrayOutputStream> dataBySegment = new HashMap<>();
// Add a few appends to each Transaction aggregator and to the parent aggregator and seal all Transactions.
for (int i = 0; i < context.transactionAggregators.length; i++) {
SegmentAggregator transactionAggregator = context.transactionAggregators[i];
long transactionId = transactionAggregator.getMetadata().getId();
ByteArrayOutputStream writtenData = new ByteArrayOutputStream();
dataBySegment.put(transactionId, writtenData);
for (int appendId = 0; appendId < appendCount; appendId++) {
StorageOperation appendOp = generateAppendAndUpdateMetadata(appendId, transactionId, context);
transactionAggregator.add(appendOp);
getAppendData(appendOp, writtenData, context);
}
transactionAggregator.add(generateSealAndUpdateMetadata(transactionId, context));
}
// Merge all the Transactions in the parent Segment.
@Cleanup ByteArrayOutputStream parentData = new ByteArrayOutputStream();
for (int transIndex = 0; transIndex < context.transactionAggregators.length; transIndex++) {
// Merge this Transaction into the parent & record its data in the final parent data array.
long transactionId = context.transactionAggregators[transIndex].getMetadata().getId();
context.segmentAggregator.add(generateMergeTransactionAndUpdateMetadata(transactionId, context));
ByteArrayOutputStream transactionData = dataBySegment.get(transactionId);
parentData.write(transactionData.toByteArray());
transactionData.close();
}
// Have the writes fail every few attempts with a well known exception.
AtomicReference<IntentionalException> setException = new AtomicReference<>();
Supplier<Exception> exceptionSupplier = () -> {
IntentionalException ex = new IntentionalException(Long.toString(context.timer.getElapsedMillis()));
setException.set(ex);
return ex;
};
context.storage.setConcatSyncErrorInjector(new ErrorInjector<>(count -> count % failSyncEvery == 0, exceptionSupplier));
context.storage.setConcatAsyncErrorInjector(new ErrorInjector<>(count -> count % failAsyncEvery == 0, exceptionSupplier));
// Flush all the Aggregators, while checking that the right errors get handled and can be recovered from.
tryFlushAllSegments(context, () -> setException.set(null), setException::get);
// Verify that all Transactions are now fully merged.
for (SegmentAggregator transactionAggregator : context.transactionAggregators) {
SegmentMetadata transactionMetadata = transactionAggregator.getMetadata();
Assert.assertTrue("Merged Transaction was not marked as deleted in metadata.", transactionMetadata.isDeleted());
Assert.assertFalse("Merged Transaction still exists in storage.", context.storage.exists(transactionMetadata.getName(), TIMEOUT).join());
}
// Verify that in the end, the contents of the parents is as expected.
byte[] expectedData = parentData.toByteArray();
byte[] actualData = new byte[expectedData.length];
long storageLength = context.storage.getStreamSegmentInfo(context.segmentAggregator.getMetadata().getName(), TIMEOUT).join().getLength();
Assert.assertEquals("Unexpected number of bytes flushed/merged to Storage.", expectedData.length, storageLength);
context.storage.read(readHandle(context.segmentAggregator.getMetadata().getName()), 0, actualData, 0, actualData.length, TIMEOUT).join();
Assert.assertArrayEquals("Unexpected data written to storage.", expectedData, actualData);
}
use of io.pravega.segmentstore.server.SegmentMetadata in project pravega by pravega.
the class SegmentAggregatorTests method testAddValidOperations.
// endregion
// region add()
/**
* Tests the add() method with valid operations only.
*/
@Test
public void testAddValidOperations() throws Exception {
final int appendCount = 20;
@Cleanup TestContext context = new TestContext(DEFAULT_CONFIG);
// We want to make sure we do not prematurely acknowledge anything.
context.dataSource.setCompleteMergeCallback((target, source) -> Assert.fail("Not expecting any merger callbacks yet."));
// We need one Transaction for this test (to which we populate data).
SegmentAggregator transactionAggregator = context.transactionAggregators[0];
SegmentMetadata transactionMetadata = transactionAggregator.getMetadata();
// We also need an empty transaction.
SegmentAggregator emptyTransactionAggregator = context.transactionAggregators[1];
SegmentMetadata emptyTransactionMetadata = emptyTransactionAggregator.getMetadata();
context.segmentAggregator.initialize(TIMEOUT).join();
transactionAggregator.initialize(TIMEOUT).join();
emptyTransactionAggregator.initialize(TIMEOUT).join();
// Seal the Empty Transaction and add a MergeTransactionOperation to the parent (do this before everything else.
emptyTransactionAggregator.add(generateSealAndUpdateMetadata(emptyTransactionMetadata.getId(), context));
context.segmentAggregator.add(generateMergeTransactionAndUpdateMetadata(emptyTransactionMetadata.getId(), context));
// Verify Appends with correct parameters work as expected.
for (int i = 0; i < appendCount; i++) {
context.segmentAggregator.add(generateAppendAndUpdateMetadata(i, SEGMENT_ID, context));
transactionAggregator.add(generateAppendAndUpdateMetadata(i, transactionMetadata.getId(), context));
}
// Seal the Transaction and add a MergeSegmentOperation to the parent.
transactionAggregator.add(generateSealAndUpdateMetadata(transactionMetadata.getId(), context));
context.segmentAggregator.add(generateMergeTransactionAndUpdateMetadata(transactionMetadata.getId(), context));
// Add more appends to the parent, and truncate the Segment bit by bit.
for (int i = 0; i < appendCount; i++) {
context.segmentAggregator.add(generateAppendAndUpdateMetadata(i, SEGMENT_ID, context));
if (i % 2 == 1) {
// Every other Append, do a Truncate. This helps us check both Append Aggregation and Segment Truncation.
context.segmentAggregator.add(generateTruncateAndUpdateMetadata(SEGMENT_ID, context));
}
}
// Seal the parent, then truncate again.
context.segmentAggregator.add(generateSealAndUpdateMetadata(SEGMENT_ID, context));
context.segmentAggregator.add(generateTruncateAndUpdateMetadata(SEGMENT_ID, context));
// This should have no effect and not throw any errors.
context.segmentAggregator.add(new UpdateAttributesOperation(SEGMENT_ID, AttributeUpdateCollection.from(new AttributeUpdate(AttributeId.randomUUID(), AttributeUpdateType.Replace, 1))));
}
use of io.pravega.segmentstore.server.SegmentMetadata in project pravega by pravega.
the class ContainerKeyIndexTests method testRegularSegmentThrottling.
/**
* Tests that regular Segments get the right amount of credits.
*/
@Test
public void testRegularSegmentThrottling() {
@Cleanup val context = new TestContext();
@Cleanup ContainerKeyIndex.SegmentTracker segmentTracker = context.index.new SegmentTracker();
DirectSegmentAccess mockSegment = Mockito.mock(DirectSegmentAccess.class);
SegmentMetadata mockSegmentMetadata = Mockito.mock(SegmentMetadata.class);
// Regular segment.
SegmentType segmentType = SegmentType.builder().build();
Mockito.when(mockSegmentMetadata.getType()).thenReturn(segmentType);
Mockito.when(mockSegment.getInfo()).thenReturn(mockSegmentMetadata);
Mockito.when(mockSegment.getSegmentId()).thenReturn(1L);
int updateSize = TableExtensionConfig.MAX_UNINDEXED_LENGTH.getDefaultValue() - 1;
segmentTracker.throttleIfNeeded(mockSegment, () -> CompletableFuture.completedFuture(null), updateSize).join();
Assert.assertEquals(segmentTracker.getUnindexedSizeBytes(1L), TableExtensionConfig.MAX_UNINDEXED_LENGTH.getDefaultValue() - 1);
}
use of io.pravega.segmentstore.server.SegmentMetadata in project pravega by pravega.
the class StreamSegmentContainerMetadataTests method checkEvictedSegmentCandidates.
private void checkEvictedSegmentCandidates(Collection<SegmentMetadata> candidates, UpdateableContainerMetadata metadata, long expirationSeqNo, long truncatedSeqNo) {
long cutoffSeqNo = Math.min(expirationSeqNo, truncatedSeqNo);
HashSet<Long> candidateIds = new HashSet<>();
for (SegmentMetadata candidate : candidates) {
// Check that all segments in candidates are actually eligible for removal.
boolean isEligible = shouldExpectRemoval(candidate.getId(), metadata, cutoffSeqNo, truncatedSeqNo);
Assert.assertTrue("Unexpected eviction candidate in segment " + candidate.getId(), isEligible);
// Check that all segments in candidates are not actually removed from the metadata.
Assert.assertNotNull("ContainerMetadata no longer has metadata for eviction candidate segment " + candidate.getId(), metadata.getStreamSegmentMetadata(candidate.getId()));
Assert.assertNotEquals("ContainerMetadata no longer has name mapping for eviction candidate segment " + candidate.getId(), ContainerMetadata.NO_STREAM_SEGMENT_ID, metadata.getStreamSegmentId(candidate.getName(), false));
candidateIds.add(candidate.getId());
}
// Check that all segments remaining in the metadata are still eligible to remain there.
for (long segmentId : metadata.getAllStreamSegmentIds()) {
if (!candidateIds.contains(segmentId)) {
boolean expectedRemoved = shouldExpectRemoval(segmentId, metadata, cutoffSeqNo, truncatedSeqNo);
Assert.assertFalse("Unexpected non-eviction for segment " + segmentId, expectedRemoved);
}
}
}
Aggregations