use of io.pravega.segmentstore.storage.Storage in project pravega by pravega.
the class DurableLogTests method testRecoveryWithMetadataCleanup.
/**
* Tests the following recovery scenario:
* 1. A Segment is created and recorded in the metadata with some optional operations executing on it.
* 2. The segment is evicted from the metadata.
* 3. The segment is reactivated (with a new metadata mapping) - possibly due to an append. No truncation since #2.
* 4. Recovery.
*/
@Test
public void testRecoveryWithMetadataCleanup() throws Exception {
final long truncatedSeqNo = Integer.MAX_VALUE;
// Setup a DurableLog and start it.
@Cleanup TestDurableDataLogFactory dataLogFactory = new TestDurableDataLogFactory(new InMemoryDurableDataLogFactory(MAX_DATA_LOG_APPEND_SIZE, executorService()));
@Cleanup Storage storage = InMemoryStorageFactory.newStorage(executorService());
storage.initialize(1);
long segmentId;
// First DurableLog. We use this for generating data.
val metadata1 = (StreamSegmentContainerMetadata) new MetadataBuilder(CONTAINER_ID).build();
@Cleanup InMemoryCacheFactory cacheFactory = new InMemoryCacheFactory();
@Cleanup CacheManager cacheManager = new CacheManager(DEFAULT_READ_INDEX_CONFIG.getCachePolicy(), executorService());
SegmentProperties originalSegmentInfo;
try (ReadIndex readIndex = new ContainerReadIndex(DEFAULT_READ_INDEX_CONFIG, metadata1, cacheFactory, storage, cacheManager, executorService());
DurableLog durableLog = new DurableLog(ContainerSetup.defaultDurableLogConfig(), metadata1, dataLogFactory, readIndex, executorService())) {
durableLog.startAsync().awaitRunning();
// Create the segment.
val segmentIds = createStreamSegmentsWithOperations(1, metadata1, durableLog, storage);
segmentId = segmentIds.stream().findFirst().orElse(-1L);
// Evict the segment.
val sm1 = metadata1.getStreamSegmentMetadata(segmentId);
originalSegmentInfo = sm1.getSnapshot();
// Simulate a truncation. This is needed in order to trigger a cleanup.
metadata1.removeTruncationMarkers(truncatedSeqNo);
val cleanedUpSegments = metadata1.cleanup(Collections.singleton(sm1), truncatedSeqNo);
Assert.assertEquals("Unexpected number of segments evicted.", 1, cleanedUpSegments.size());
// Map the segment again.
val reMapOp = new StreamSegmentMapOperation(originalSegmentInfo);
reMapOp.setStreamSegmentId(segmentId);
durableLog.add(reMapOp, TIMEOUT).join();
// Stop.
durableLog.stopAsync().awaitTerminated();
}
// Recovery #1. This should work well.
val metadata2 = (StreamSegmentContainerMetadata) new MetadataBuilder(CONTAINER_ID).build();
try (ReadIndex readIndex = new ContainerReadIndex(DEFAULT_READ_INDEX_CONFIG, metadata2, cacheFactory, storage, cacheManager, executorService());
DurableLog durableLog = new DurableLog(ContainerSetup.defaultDurableLogConfig(), metadata2, dataLogFactory, readIndex, executorService())) {
durableLog.startAsync().awaitRunning();
// Get segment info
val recoveredSegmentInfo = metadata1.getStreamSegmentMetadata(segmentId).getSnapshot();
Assert.assertEquals("Unexpected length from recovered segment.", originalSegmentInfo.getLength(), recoveredSegmentInfo.getLength());
// Now evict the segment again ...
val sm = metadata2.getStreamSegmentMetadata(segmentId);
// Simulate a truncation. This is needed in order to trigger a cleanup.
metadata2.removeTruncationMarkers(truncatedSeqNo);
val cleanedUpSegments = metadata2.cleanup(Collections.singleton(sm), truncatedSeqNo);
Assert.assertEquals("Unexpected number of segments evicted.", 1, cleanedUpSegments.size());
// ... and re-map it with a new Id. This is a perfectly valid operation, and we can't prevent it.
durableLog.add(new StreamSegmentMapOperation(originalSegmentInfo), TIMEOUT).join();
// Stop.
durableLog.stopAsync().awaitTerminated();
}
// Recovery #2. This should fail due to the same segment mapped multiple times with different ids.
val metadata3 = (StreamSegmentContainerMetadata) new MetadataBuilder(CONTAINER_ID).build();
try (ReadIndex readIndex = new ContainerReadIndex(DEFAULT_READ_INDEX_CONFIG, metadata3, cacheFactory, storage, cacheManager, executorService());
DurableLog durableLog = new DurableLog(ContainerSetup.defaultDurableLogConfig(), metadata3, dataLogFactory, readIndex, executorService())) {
AssertExtensions.assertThrows("Recovery did not fail with the expected exception in case of multi-mapping", () -> durableLog.startAsync().awaitRunning(), ex -> ex instanceof IllegalStateException && ex.getCause() instanceof DataCorruptionException && ex.getCause().getCause() instanceof MetadataUpdateException);
}
}
use of io.pravega.segmentstore.storage.Storage in project pravega by pravega.
the class ExtendedS3StorageTest method testCreateIfNoneMatch.
// region If-none-match test
/**
* Tests the create() method with if-none-match set. Note that we currently
* do not run a real storage tier, so we cannot verify the behavior of the
* option against a real storage. Here instead, we are simply making sure
* that the new execution path does not break anything.
*/
@Test
public void testCreateIfNoneMatch() {
val adapterConfig = ExtendedS3StorageConfig.builder().with(ExtendedS3StorageConfig.BUCKET, setup.adapterConfig.getBucket()).with(ExtendedS3StorageConfig.ACCESS_KEY_ID, "x").with(ExtendedS3StorageConfig.SECRET_KEY, "x").with(ExtendedS3StorageConfig.ROOT, "test").with(ExtendedS3StorageConfig.URI, setup.endpoint).with(ExtendedS3StorageConfig.USENONEMATCH, true).build();
String segmentName = "foo_open";
try (Storage s = createStorage(setup.client, adapterConfig, executorService())) {
s.initialize(DEFAULT_EPOCH);
s.create(segmentName, null).join();
assertThrows("create() did not throw for existing StreamSegment.", s.create(segmentName, null), ex -> ex instanceof StreamSegmentExistsException);
}
}
use of io.pravega.segmentstore.storage.Storage in project pravega by pravega.
the class SegmentAggregator method flushNormally.
/**
* Repeatedly flushes the contents of the Aggregator to the Storage as long as something immediate needs to be flushed,
* such as a Seal or Merge operation.
*
* @param timer Timer for the operation.
* @return A CompletableFuture that, when completed, will contain the result from the flush operation.
*/
private CompletableFuture<FlushResult> flushNormally(TimeoutTimer timer) {
assert this.state.get() == AggregatorState.Writing : "flushNormally cannot be called if state == " + this.state;
long traceId = LoggerHelpers.traceEnterWithContext(log, this.traceObjectId, "flushNormally", this.operations.size());
FlushResult result = new FlushResult();
AtomicBoolean canContinue = new AtomicBoolean(true);
return Futures.loop(canContinue::get, () -> flushOnce(timer), partialResult -> {
canContinue.set(partialResult.getFlushedBytes() + partialResult.getMergedBytes() > 0);
result.withFlushResult(partialResult);
}, this.executor).thenApply(v -> {
LoggerHelpers.traceLeave(log, this.traceObjectId, "flushNormally", traceId, result);
return result;
});
}
use of io.pravega.segmentstore.storage.Storage in project pravega by pravega.
the class SegmentAggregator method initialize.
// endregion
// region Operations
/**
* Initializes the SegmentAggregator by pulling information from the given Storage.
*
* @param timeout Timeout for the operation.
* @return A CompletableFuture that, when completed, will indicate that the operation finished successfully. If any
* errors occurred during the operation, the Future will be completed with the appropriate exception.
*/
CompletableFuture<Void> initialize(Duration timeout) {
Exceptions.checkNotClosed(isClosed(), this);
Preconditions.checkState(this.state.get() == AggregatorState.NotInitialized, "SegmentAggregator has already been initialized.");
assert this.handle.get() == null : "non-null handle but state == " + this.state.get();
long traceId = LoggerHelpers.traceEnterWithContext(log, this.traceObjectId, "initialize");
return openWrite(this.metadata.getName(), this.handle, timeout).thenAcceptAsync(segmentInfo -> {
// Check & Update StorageLength in metadata.
if (this.metadata.getStorageLength() != segmentInfo.getLength()) {
if (this.metadata.getStorageLength() >= 0) {
// Only log warning if the StorageLength has actually been initialized, but is different.
log.warn("{}: SegmentMetadata has a StorageLength ({}) that is different than the actual one ({}) - updating metadata.", this.traceObjectId, this.metadata.getStorageLength(), segmentInfo.getLength());
}
// It is very important to keep this value up-to-date and correct.
this.metadata.setStorageLength(segmentInfo.getLength());
}
// Check if the Storage segment is sealed, but it's not in metadata (this is 100% indicative of some data corruption happening).
if (segmentInfo.isSealed()) {
if (!this.metadata.isSealed()) {
throw new CompletionException(new DataCorruptionException(String.format("Segment '%s' is sealed in Storage but not in the metadata.", this.metadata.getName())));
}
if (!this.metadata.isSealedInStorage()) {
this.metadata.markSealedInStorage();
log.warn("{}: Segment is sealed in Storage but metadata does not reflect that - updating metadata.", this.traceObjectId);
}
}
log.info("{}: Initialized. StorageLength = {}, Sealed = {}.", this.traceObjectId, segmentInfo.getLength(), segmentInfo.isSealed());
LoggerHelpers.traceLeave(log, this.traceObjectId, "initialize", traceId);
setState(AggregatorState.Writing);
}, this.executor).exceptionally(ex -> {
ex = Exceptions.unwrap(ex);
if (ex instanceof StreamSegmentNotExistsException) {
// Segment does not exist anymore. This is a real possibility during recovery, in the following cases:
// * We already processed a Segment Deletion but did not have a chance to checkpoint metadata
// * We processed a TransactionMergeOperation but did not have a chance to ack/truncate the DataSource
// Update metadata, just in case it is not already updated.
this.metadata.markDeleted();
log.warn("{}: Segment does not exist in Storage. Ignoring all further operations on it.", this.traceObjectId, ex);
setState(AggregatorState.Writing);
LoggerHelpers.traceLeave(log, this.traceObjectId, "initialize", traceId);
} else {
// Other kind of error - re-throw.
throw new CompletionException(ex);
}
return null;
});
}
use of io.pravega.segmentstore.storage.Storage in project pravega by pravega.
the class SegmentAggregator method mergeWith.
/**
* Merges the Transaction StreamSegment with given metadata into this one at the current offset.
*
* @param transactionMetadata The metadata of the Transaction StreamSegment to merge.
* @param timer Timer for the operation.
* @return A CompletableFuture that, when completed, will contain the number of bytes that were merged into this
* StreamSegment. If failed, the Future will contain the exception that caused it.
*/
private CompletableFuture<FlushResult> mergeWith(UpdateableSegmentMetadata transactionMetadata, MergeTransactionOperation mergeOp, TimeoutTimer timer) {
if (transactionMetadata.isDeleted()) {
return Futures.failedFuture(new DataCorruptionException(String.format("Attempted to merge with deleted Transaction segment '%s'.", transactionMetadata.getName())));
}
long traceId = LoggerHelpers.traceEnterWithContext(log, this.traceObjectId, "mergeWith", transactionMetadata.getId(), transactionMetadata.getName(), transactionMetadata.isSealedInStorage());
FlushResult result = new FlushResult();
if (!transactionMetadata.isSealedInStorage() || transactionMetadata.getLength() > transactionMetadata.getStorageLength()) {
// Nothing to do. Given Transaction is not eligible for merger yet.
LoggerHelpers.traceLeave(log, this.traceObjectId, "mergeWith", traceId, result);
return CompletableFuture.completedFuture(result);
}
AtomicLong mergedLength = new AtomicLong();
return this.storage.getStreamSegmentInfo(transactionMetadata.getName(), timer.getRemaining()).thenAcceptAsync(transProperties -> {
// Check that the Storage agrees with our metadata (if not, we have a problem ...)
if (transProperties.getLength() != transactionMetadata.getStorageLength()) {
throw new CompletionException(new DataCorruptionException(String.format("Transaction Segment '%s' cannot be merged into parent '%s' because its metadata disagrees with the Storage. Metadata.StorageLength=%d, Storage.StorageLength=%d", transactionMetadata.getName(), this.metadata.getName(), transactionMetadata.getStorageLength(), transProperties.getLength())));
}
if (transProperties.getLength() != mergeOp.getLength()) {
throw new CompletionException(new DataCorruptionException(String.format("Transaction Segment '%s' cannot be merged into parent '%s' because the declared length in the operation disagrees with the Storage. Operation.Length=%d, Storage.StorageLength=%d", transactionMetadata.getName(), this.metadata.getName(), mergeOp.getLength(), transProperties.getLength())));
}
mergedLength.set(transProperties.getLength());
}, this.executor).thenComposeAsync(v1 -> storage.concat(this.handle.get(), mergeOp.getStreamSegmentOffset(), transactionMetadata.getName(), timer.getRemaining()), this.executor).thenComposeAsync(v2 -> storage.getStreamSegmentInfo(this.metadata.getName(), timer.getRemaining()), this.executor).thenApplyAsync(segmentProperties -> {
// We have processed a MergeTransactionOperation, pop the first operation off and decrement the counter.
StorageOperation processedOperation = this.operations.removeFirst();
assert processedOperation != null && processedOperation instanceof MergeTransactionOperation : "First outstanding operation was not a MergeTransactionOperation";
assert ((MergeTransactionOperation) processedOperation).getTransactionSegmentId() == transactionMetadata.getId() : "First outstanding operation was a MergeTransactionOperation for the wrong Transaction id.";
int newCount = this.mergeTransactionCount.decrementAndGet();
assert newCount >= 0 : "Negative value for mergeTransactionCount";
// Post-merger validation. Verify we are still in agreement with the storage.
long expectedNewLength = this.metadata.getStorageLength() + mergedLength.get();
if (segmentProperties.getLength() != expectedNewLength) {
throw new CompletionException(new DataCorruptionException(String.format("Transaction Segment '%s' was merged into parent '%s' but the parent segment has an unexpected StorageLength after the merger. Previous=%d, MergeLength=%d, Expected=%d, Actual=%d", transactionMetadata.getName(), this.metadata.getName(), segmentProperties.getLength(), mergedLength.get(), expectedNewLength, segmentProperties.getLength())));
}
updateMetadata(segmentProperties);
updateMetadataForTransactionPostMerger(transactionMetadata);
this.lastFlush.set(this.timer.getElapsed());
result.withMergedBytes(mergedLength.get());
LoggerHelpers.traceLeave(log, this.traceObjectId, "mergeWith", traceId, result);
return result;
}, this.executor).exceptionally(ex -> {
Throwable realEx = Exceptions.unwrap(ex);
if (realEx instanceof BadOffsetException || realEx instanceof StreamSegmentNotExistsException) {
// We either attempted to write at an offset that already contained other data or the Transaction
// Segment no longer exists. This can happen for a number of reasons, but we do not have enough
// information here to determine why. We need to enter reconciliation mode, and hope for the best.
setState(AggregatorState.ReconciliationNeeded);
}
// Rethrow all exceptions.
throw new CompletionException(ex);
});
}
Aggregations