use of io.pravega.segmentstore.storage.SegmentHandle in project pravega by pravega.
the class NoOpStorageUserDataWriteOnlyTests method testSeal.
@Override
@Test
public void testSeal() throws Exception {
final String segmentName = "sealSegment";
try (Storage s = createStorage()) {
s.initialize(DEFAULT_EPOCH);
SegmentHandle handle = s.openWrite(segmentName).join();
assertEquals(segmentName, handle.getSegmentName());
assertEquals(false, handle.isReadOnly());
s.seal(handle, TIMEOUT).join();
}
}
use of io.pravega.segmentstore.storage.SegmentHandle in project pravega by pravega.
the class HDFSStorageTest method testFencing.
// region Fencing tests
/**
* Tests fencing abilities. We create two different Storage objects with different owner ids.
* Part 1: Creation:
* * We create the Segment on Storage1:
* ** We verify that Storage1 can execute all operations.
* ** We verify that Storage2 can execute only read-only operations.
* * We open the Segment on Storage2:
* ** We verify that Storage1 can execute only read-only operations.
* ** We verify that Storage2 can execute all operations.
*/
@Test
@Override
public void testFencing() throws Exception {
final long epoch1 = 1;
final long epoch2 = 2;
final String segmentName = "segment";
try (val storage1 = createStorage();
val storage2 = createStorage()) {
storage1.initialize(epoch1);
storage2.initialize(epoch2);
// Create segment in Storage1 (thus Storage1 owns it for now).
storage1.create(segmentName, TIMEOUT).join();
// Storage1 should be able to execute all operations.
SegmentHandle handle1 = storage1.openWrite(segmentName).join();
verifyWriteOperationsSucceed(handle1, storage1);
verifyReadOnlyOperationsSucceed(handle1, storage1);
// Open the segment in Storage2 (thus Storage2 owns it for now).
SegmentHandle handle2 = storage2.openWrite(segmentName).join();
// Storage1 should be able to execute only read-only operations.
verifyWriteOperationsFail(handle1, storage1);
verifyReadOnlyOperationsSucceed(handle1, storage1);
// Storage2 should be able to execute all operations.
verifyReadOnlyOperationsSucceed(handle2, storage2);
verifyWriteOperationsSucceed(handle2, storage2);
// Seal and Delete (these should be run last, otherwise we can't run our test).
verifyFinalWriteOperationsFail(handle1, storage1);
verifyFinalWriteOperationsSucceed(handle2, storage2);
}
}
use of io.pravega.segmentstore.storage.SegmentHandle in project pravega by pravega.
the class StorageWriterTests method testWithStorageCorruptionErrors.
/**
* Tests the StorageWriter in a Scenario where the Storage component throws data corruption exceptions (i.e., badOffset,
* and after reconciliation, the data is still corrupt).
*/
@Test
public void testWithStorageCorruptionErrors() throws Exception {
AtomicBoolean corruptionHappened = new AtomicBoolean();
Function<TestContext, ErrorInjector<Exception>> createErrorInjector = context -> {
byte[] corruptionData = "foo".getBytes();
SegmentHandle corruptedSegmentHandle = InMemoryStorage.newHandle(context.metadata.getStreamSegmentMetadata(0).getName(), false);
Supplier<Exception> exceptionSupplier = () -> {
// Corrupt data. We use an internal method (append) to atomically write data at the end of the segment.
// GetLength+Write would not work well because there may be concurrent writes that modify the data between
// requesting the length and attempting to write, thus causing the corruption to fail.
// NOTE: this is a synchronous call, but append() is also a sync method. If append() would become async,
// care must be taken not to block a thread while waiting for it.
context.storage.append(corruptedSegmentHandle, new ByteArrayInputStream(corruptionData), corruptionData.length);
// Return some other kind of exception.
return new TimeoutException("Intentional");
};
return new ErrorInjector<>(c -> !corruptionHappened.getAndSet(true), exceptionSupplier);
};
testWithStorageCriticalErrors(createErrorInjector, ex -> ex instanceof ReconciliationFailureException);
}
use of io.pravega.segmentstore.storage.SegmentHandle in project pravega by pravega.
the class ChunkedSegmentStorage method seal.
@Override
public CompletableFuture<Void> seal(SegmentHandle handle, Duration timeout) {
checkInitialized();
return executeSerialized(() -> {
val traceId = LoggerHelpers.traceEnter(log, "seal", handle);
Timer timer = new Timer();
log.debug("{} seal - started segment={}.", logPrefix, handle.getSegmentName());
Preconditions.checkNotNull(handle, "handle");
String streamSegmentName = handle.getSegmentName();
Preconditions.checkNotNull(streamSegmentName, "streamSegmentName");
Preconditions.checkArgument(!handle.isReadOnly(), "handle must not be read only. Segment=%s", handle.getSegmentName());
return tryWith(metadataStore.beginTransaction(false, handle.getSegmentName()), txn -> txn.get(streamSegmentName).thenComposeAsync(storageMetadata -> {
val segmentMetadata = (SegmentMetadata) storageMetadata;
// Validate preconditions.
checkSegmentExists(streamSegmentName, segmentMetadata);
checkOwnership(streamSegmentName, segmentMetadata);
// seal if it is not already sealed.
if (!segmentMetadata.isSealed()) {
segmentMetadata.setSealed(true);
txn.update(segmentMetadata);
return txn.commit();
} else {
return CompletableFuture.completedFuture(null);
}
}, executor).thenRunAsync(() -> {
log.debug("{} seal - finished segment={} latency={}.", logPrefix, handle.getSegmentName(), timer.getElapsedMillis());
LoggerHelpers.traceLeave(log, "seal", traceId, handle);
}, executor), executor).exceptionally(ex -> {
log.warn("{} seal - exception segment={} latency={}.", logPrefix, handle.getSegmentName(), timer.getElapsedMillis(), ex);
handleException(streamSegmentName, ex);
return null;
});
}, handle.getSegmentName());
}
use of io.pravega.segmentstore.storage.SegmentHandle in project pravega by pravega.
the class ChunkedSegmentStorage method create.
@Override
public CompletableFuture<SegmentHandle> create(String streamSegmentName, SegmentRollingPolicy rollingPolicy, Duration timeout) {
checkInitialized();
return executeSerialized(() -> {
val traceId = LoggerHelpers.traceEnter(log, "create", streamSegmentName, rollingPolicy);
val timer = new Timer();
log.debug("{} create - started segment={}, rollingPolicy={}.", logPrefix, streamSegmentName, rollingPolicy);
return tryWith(metadataStore.beginTransaction(false, streamSegmentName), txn -> {
// Retrieve metadata and make sure it does not exist.
return txn.get(streamSegmentName).thenComposeAsync(storageMetadata -> {
val oldSegmentMetadata = (SegmentMetadata) storageMetadata;
if (null != oldSegmentMetadata) {
throw new CompletionException(new StreamSegmentExistsException(streamSegmentName));
}
// Create a new record.
val newSegmentMetadata = SegmentMetadata.builder().name(streamSegmentName).maxRollinglength(rollingPolicy.getMaxLength() == 0 ? SegmentRollingPolicy.NO_ROLLING.getMaxLength() : rollingPolicy.getMaxLength()).ownerEpoch(this.epoch).build();
newSegmentMetadata.setActive(true);
txn.create(newSegmentMetadata);
// commit.
return txn.commit().thenApplyAsync(v -> {
val retValue = SegmentStorageHandle.writeHandle(streamSegmentName);
Duration elapsed = timer.getElapsed();
SLTS_CREATE_LATENCY.reportSuccessEvent(elapsed);
SLTS_CREATE_COUNT.inc();
log.debug("{} create - finished segment={}, rollingPolicy={}, latency={}.", logPrefix, streamSegmentName, rollingPolicy, elapsed.toMillis());
LoggerHelpers.traceLeave(log, "create", traceId, retValue);
return retValue;
}, executor);
}, executor);
}, executor).handleAsync((v, e) -> {
if (null != e) {
log.debug("{} create - exception segment={}, rollingPolicy={}, latency={}.", logPrefix, streamSegmentName, rollingPolicy, timer.getElapsedMillis(), e);
handleException(streamSegmentName, e);
}
return v;
}, executor);
}, streamSegmentName);
}
Aggregations