use of io.pravega.segmentstore.storage.SegmentHandle in project pravega by pravega.
the class RollingStorage method unsealLastChunkIfNecessary.
private void unsealLastChunkIfNecessary(RollingSegmentHandle handle) throws StreamSegmentException {
SegmentChunk last = handle.lastChunk();
if (last == null || !last.isSealed()) {
// Nothing to do.
return;
}
SegmentHandle activeChunk = handle.getActiveChunkHandle();
boolean needsHandleUpdate = activeChunk == null;
if (needsHandleUpdate) {
// We didn't have a pointer to the active chunk's Handle because the chunk was sealed before open-write.
activeChunk = this.baseStorage.openWrite(last.getName());
}
try {
this.baseStorage.unseal(activeChunk);
} catch (UnsupportedOperationException e) {
log.warn("Unable to unseal SegmentChunk '{}' since base storage does not support unsealing.", last);
return;
}
last.markUnsealed();
if (needsHandleUpdate) {
activeChunk = this.baseStorage.openWrite(last.getName());
handle.setActiveChunkHandle(activeChunk);
}
log.debug("Unsealed active SegmentChunk '{}' for '{}'.", activeChunk.getSegmentName(), handle.getSegmentName());
}
use of io.pravega.segmentstore.storage.SegmentHandle in project pravega by pravega.
the class RollingStorage method create.
@Override
public SegmentProperties create(String segmentName, SegmentRollingPolicy rollingPolicy) throws StreamSegmentException {
Preconditions.checkNotNull(rollingPolicy, "rollingPolicy");
String headerName = StreamSegmentNameUtils.getHeaderSegmentName(segmentName);
long traceId = LoggerHelpers.traceEnter(log, "create", segmentName, rollingPolicy);
// RollingStorage to this baseStorage).
if (this.baseStorage.exists(segmentName)) {
throw new StreamSegmentExistsException(segmentName);
}
// Create the header file, and then serialize the contents to it.
// If the header file already exists, then it's OK if it's empty (probably a remnant from a previously failed
// attempt); in that case we ignore it and let the creation proceed.
SegmentHandle headerHandle = null;
try {
try {
this.baseStorage.create(headerName);
} catch (StreamSegmentExistsException ex) {
checkIfEmptyAndNotSealed(ex, headerName);
log.debug("Empty Segment Header found for '{}'; treating as inexistent.", segmentName);
}
headerHandle = this.baseStorage.openWrite(headerName);
serializeHandle(new RollingSegmentHandle(headerHandle, rollingPolicy, Collections.emptyList()));
} catch (StreamSegmentExistsException ex) {
throw ex;
} catch (Exception ex) {
if (!Exceptions.mustRethrow(ex) && headerHandle != null) {
// otherwise we'll leave behind an empty file.
try {
log.warn("Could not create Header Segment for '{}', rolling back.", segmentName, ex);
this.baseStorage.delete(headerHandle);
} catch (Exception ex2) {
ex.addSuppressed(ex2);
}
}
throw ex;
}
LoggerHelpers.traceLeave(log, "create", traceId, segmentName);
return StreamSegmentInformation.builder().name(segmentName).build();
}
use of io.pravega.segmentstore.storage.SegmentHandle in project pravega by pravega.
the class InMemoryStorageTests method testFencing.
@Test
@Override
public void testFencing() throws Exception {
final String segment1 = "segment1";
final String segment2 = "segment2";
@Cleanup val baseStorage = new InMemoryStorage();
@Cleanup val storage = new AsyncStorageWrapper(baseStorage, executorService());
storage.initialize(DEFAULT_EPOCH);
// Part 1: Create a segment and verify all operations are allowed.
storage.create(segment1, TIMEOUT).get(TIMEOUT.toMillis(), TimeUnit.MILLISECONDS);
SegmentHandle handle1 = storage.openWrite(segment1).join();
verifyAllOperationsSucceed(handle1, storage);
// Part 2: Change owner, verify segment operations are not allowed until a call to open() is made.
baseStorage.changeOwner();
verifyWriteOperationsFail(handle1, storage);
handle1 = storage.openWrite(segment1).join();
verifyAllOperationsSucceed(handle1, storage);
// Part 3: Create new segment and verify all operations are allowed.
storage.create(segment2, TIMEOUT).get(TIMEOUT.toMillis(), TimeUnit.MILLISECONDS);
SegmentHandle handle2 = storage.openWrite(segment2).join();
verifyAllOperationsSucceed(handle2, storage);
// Cleanup.
storage.delete(handle1, TIMEOUT).get(TIMEOUT.toMillis(), TimeUnit.MILLISECONDS);
storage.delete(handle2, TIMEOUT).get(TIMEOUT.toMillis(), TimeUnit.MILLISECONDS);
}
use of io.pravega.segmentstore.storage.SegmentHandle in project pravega by pravega.
the class StorageWriterTests method testWithStorageCorruptionErrors.
/**
* Tests the StorageWriter in a Scenario where the Storage component throws data corruption exceptions (i.e., badOffset,
* and after reconciliation, the data is still corrupt).
*/
@Test
public void testWithStorageCorruptionErrors() throws Exception {
@Cleanup TestContext context = new TestContext(DEFAULT_CONFIG);
// Create a bunch of segments and Transactions.
ArrayList<Long> segmentIds = createSegments(context);
// Append data.
HashMap<Long, ByteArrayOutputStream> segmentContents = new HashMap<>();
appendDataBreadthFirst(segmentIds, segmentContents, context);
// Corrupt (one segment should suffice).
byte[] corruptionData = "foo".getBytes();
SegmentHandle corruptedSegmentHandle = InMemoryStorage.newHandle(context.metadata.getStreamSegmentMetadata(segmentIds.get(0)).getName(), false);
Supplier<Exception> exceptionSupplier = () -> {
// Corrupt data. We use an internal method (append) to atomically write data at the end of the segment.
// GetLength+Write would not work well because there may be concurrent writes that modify the data between
// requesting the length and attempting to write, thus causing the corruption to fail.
// NOTE: this is a synchronous call, but append() is also a sync method. If append() would become async,
// care must be taken not to block a thread while waiting for it.
context.storage.append(corruptedSegmentHandle, new ByteArrayInputStream(corruptionData), corruptionData.length);
// Return some other kind of exception.
return new TimeoutException("Intentional");
};
// We only try to corrupt data once.
AtomicBoolean corruptionHappened = new AtomicBoolean();
context.storage.setWriteAsyncErrorInjector(new ErrorInjector<>(c -> !corruptionHappened.getAndSet(true), exceptionSupplier));
AssertExtensions.assertThrows("StorageWriter did not fail when a fatal data corruption error occurred.", () -> {
// The Corruption may happen early enough so the "awaitRunning" isn't complete yet. In that case,
// the writer will never reach its 'Running' state. As such, we need to make sure at least one of these
// will throw (either start or, if the failure happened after start, make sure it eventually fails and shuts down).
context.writer.startAsync().awaitRunning();
ServiceListeners.awaitShutdown(context.writer, TIMEOUT, true);
}, ex -> ex instanceof IllegalStateException);
ServiceListeners.awaitShutdown(context.writer, TIMEOUT, false);
Assert.assertTrue("Unexpected failure cause for StorageWriter.", Exceptions.unwrap(context.writer.failureCause()) instanceof ReconciliationFailureException);
}
use of io.pravega.segmentstore.storage.SegmentHandle in project pravega by pravega.
the class HDFSStorageTest method testFencing.
// region Fencing tests
/**
* Tests fencing abilities. We create two different Storage objects with different owner ids.
* Part 1: Creation:
* * We create the Segment on Storage1:
* ** We verify that Storage1 can execute all operations.
* ** We verify that Storage2 can execute only read-only operations.
* * We open the Segment on Storage2:
* ** We verify that Storage1 can execute only read-only operations.
* ** We verify that Storage2 can execute all operations.
*/
@Test
@Override
public void testFencing() throws Exception {
final long epoch1 = 1;
final long epoch2 = 2;
final String segmentName = "segment";
try (val storage1 = createStorage();
val storage2 = createStorage()) {
storage1.initialize(epoch1);
storage2.initialize(epoch2);
// Create segment in Storage1 (thus Storage1 owns it for now).
storage1.create(segmentName, TIMEOUT).join();
// Storage1 should be able to execute all operations.
SegmentHandle handle1 = storage1.openWrite(segmentName).join();
verifyWriteOperationsSucceed(handle1, storage1);
verifyReadOnlyOperationsSucceed(handle1, storage1);
// Open the segment in Storage2 (thus Storage2 owns it for now).
SegmentHandle handle2 = storage2.openWrite(segmentName).join();
// Storage1 should be able to execute only read-only operations.
verifyWriteOperationsFail(handle1, storage1);
verifyReadOnlyOperationsSucceed(handle1, storage1);
// Storage2 should be able to execute all operations.
verifyReadOnlyOperationsSucceed(handle2, storage2);
verifyWriteOperationsSucceed(handle2, storage2);
// Seal and Delete (these should be run last, otherwise we can't run our test).
verifyFinalWriteOperationsFail(handle1, storage1);
verifyFinalWriteOperationsSucceed(handle2, storage2);
}
}
Aggregations