use of io.pravega.segmentstore.storage.SegmentHandle in project pravega by pravega.
the class ChunkedSegmentStorage method openRead.
@Override
public CompletableFuture<SegmentHandle> openRead(String streamSegmentName) {
checkInitialized();
return executeParallel(() -> {
val traceId = LoggerHelpers.traceEnter(log, "openRead", streamSegmentName);
val timer = new Timer();
// Validate preconditions and return handle.
Preconditions.checkNotNull(streamSegmentName, "streamSegmentName");
log.debug("{} openRead - started segment={}.", logPrefix, streamSegmentName);
return tryWith(metadataStore.beginTransaction(false, streamSegmentName), txn -> txn.get(streamSegmentName).thenComposeAsync(storageMetadata -> {
val segmentMetadata = (SegmentMetadata) storageMetadata;
checkSegmentExists(streamSegmentName, segmentMetadata);
segmentMetadata.checkInvariants();
// This segment was created by an older segment store. Then claim ownership and adjust length.
final CompletableFuture<Void> f;
if (segmentMetadata.getOwnerEpoch() < this.epoch) {
log.debug("{} openRead - Segment needs ownership change. segment={}.", logPrefix, segmentMetadata.getName());
// In case of a fail-over, length recorded in metadata will be lagging behind its actual length in the storage.
// This can happen with lazy commits that were still not committed at the time of fail-over.
f = executeSerialized(() -> claimOwnership(txn, segmentMetadata), streamSegmentName);
} else {
f = CompletableFuture.completedFuture(null);
}
return f.thenApplyAsync(v -> {
val retValue = SegmentStorageHandle.readHandle(streamSegmentName);
log.debug("{} openRead - finished segment={} latency={}.", logPrefix, streamSegmentName, timer.getElapsedMillis());
LoggerHelpers.traceLeave(log, "openRead", traceId, retValue);
return retValue;
}, executor);
}, executor), executor).handleAsync((v, ex) -> {
if (null != ex) {
log.debug("{} openRead - exception segment={} latency={}.", logPrefix, streamSegmentName, timer.getElapsedMillis(), ex);
handleException(streamSegmentName, ex);
}
return v;
}, executor);
}, streamSegmentName);
}
use of io.pravega.segmentstore.storage.SegmentHandle in project pravega by pravega.
the class ChunkedSegmentStorage method openWrite.
@Override
public CompletableFuture<SegmentHandle> openWrite(String streamSegmentName) {
checkInitialized();
return executeSerialized(() -> {
val traceId = LoggerHelpers.traceEnter(log, "openWrite", streamSegmentName);
val timer = new Timer();
Preconditions.checkNotNull(streamSegmentName, "streamSegmentName");
log.debug("{} openWrite - started segment={}.", logPrefix, streamSegmentName);
return tryWith(metadataStore.beginTransaction(false, streamSegmentName), txn -> txn.get(streamSegmentName).thenComposeAsync(storageMetadata -> {
val segmentMetadata = (SegmentMetadata) storageMetadata;
checkSegmentExists(streamSegmentName, segmentMetadata);
segmentMetadata.checkInvariants();
// This segment was created by an older segment store. Need to start a fresh new chunk.
final CompletableFuture<Void> f;
if (segmentMetadata.getOwnerEpoch() < this.epoch) {
log.debug("{} openWrite - Segment needs ownership change - segment={}.", logPrefix, segmentMetadata.getName());
f = claimOwnership(txn, segmentMetadata);
} else {
f = CompletableFuture.completedFuture(null);
}
return f.thenApplyAsync(v -> {
// If created by newer instance then abort.
checkOwnership(streamSegmentName, segmentMetadata);
// This instance is the owner, return a handle.
val retValue = SegmentStorageHandle.writeHandle(streamSegmentName);
log.debug("{} openWrite - finished segment={} latency={}.", logPrefix, streamSegmentName, timer.getElapsedMillis());
LoggerHelpers.traceLeave(log, "openWrite", traceId, retValue);
return retValue;
}, executor);
}, executor), executor).handleAsync((v, ex) -> {
if (null != ex) {
log.debug("{} openWrite - exception segment={} latency={}.", logPrefix, streamSegmentName, timer.getElapsedMillis(), ex);
handleException(streamSegmentName, ex);
}
return v;
}, executor);
}, streamSegmentName);
}
use of io.pravega.segmentstore.storage.SegmentHandle in project pravega by pravega.
the class HDFSStorageTest method testZombieFencing.
// region Fencing tests
/**
* A special test case of fencing to verify the behavior of HDFSStorage in the presence of an instance that has
* been fenced out. This case verifies that any ongoing writes properly fail upon fencing. Specifically, we have a
* fenced-out instance that keeps writing and we verify that the write fails once the ownership changes.
* The HDFS behavior is such in this case is that ongoing writes that execute before the rename
* complete successfully.
*/
@Test(timeout = 60000)
public void testZombieFencing() throws Exception {
final long epochCount = 30;
final int writeSize = 1000;
final String segmentName = "Segment";
@Cleanup val writtenData = new ByteBufferOutputStream();
final Random rnd = new Random(0);
int currentEpoch = 1;
// Create initial adapter.
val currentStorage = new AtomicReference<Storage>();
currentStorage.set(createStorage());
currentStorage.get().initialize(currentEpoch);
// Create the Segment and open it for the first time.
val currentHandle = new AtomicReference<SegmentHandle>(currentStorage.get().create(segmentName, TIMEOUT).thenCompose(v -> currentStorage.get().openWrite(segmentName)).get(TIMEOUT.toMillis(), TimeUnit.MILLISECONDS));
// Run a number of epochs.
while (currentEpoch <= epochCount) {
val oldStorage = currentStorage.get();
val handle = currentHandle.get();
val writeBuffer = new byte[writeSize];
val appends = Futures.loop(() -> true, () -> {
rnd.nextBytes(writeBuffer);
return oldStorage.write(handle, writtenData.size(), new ByteArrayInputStream(writeBuffer), writeBuffer.length, TIMEOUT).thenRun(() -> writtenData.write(writeBuffer));
}, executorService());
// Create a new Storage adapter with a new epoch and open-write the Segment, remembering its handle.
val newStorage = createStorage();
try {
newStorage.initialize(++currentEpoch);
currentHandle.set(newStorage.openWrite(segmentName).get(TIMEOUT.toMillis(), TimeUnit.MILLISECONDS));
} catch (Exception ex) {
newStorage.close();
throw ex;
}
currentStorage.set(newStorage);
try {
appends.get(TIMEOUT.toMillis(), TimeUnit.MILLISECONDS);
Assert.fail("Continuous appends on older epoch Adapter did not fail.");
} catch (Exception ex) {
val cause = Exceptions.unwrap(ex);
if (!(cause instanceof StorageNotPrimaryException || cause instanceof StreamSegmentSealedException || cause instanceof StreamSegmentNotExistsException)) {
// We only expect the appends to fail because they were fenced out or the Segment was sealed.
Assert.fail("Unexpected exception " + cause);
}
} finally {
oldStorage.close();
}
}
byte[] expectedData = writtenData.getData().getCopy();
byte[] readData = new byte[expectedData.length];
@Cleanup val readStorage = createStorage();
readStorage.initialize(++currentEpoch);
int bytesRead = readStorage.openRead(segmentName).thenCompose(handle -> readStorage.read(handle, 0, readData, 0, readData.length, TIMEOUT)).get(TIMEOUT.toMillis(), TimeUnit.MILLISECONDS);
Assert.assertEquals("Unexpected number of bytes read.", readData.length, bytesRead);
Assert.assertArrayEquals("Unexpected data read back.", expectedData, readData);
}
use of io.pravega.segmentstore.storage.SegmentHandle in project pravega by pravega.
the class HDFSStorageTest method testFencing.
/**
* Tests fencing abilities. We create two different Storage objects with different owner ids.
* Part 1: Creation:
* * We create the Segment on Storage1:
* ** We verify that Storage1 can execute all operations.
* ** We verify that Storage2 can execute only read-only operations.
* * We open the Segment on Storage2:
* ** We verify that Storage1 can execute only read-only operations.
* ** We verify that Storage2 can execute all operations.
*/
@Test
@Override
public void testFencing() {
final long epoch1 = 1;
final long epoch2 = 2;
final String segmentName = "segment";
try (val storage1 = createStorage();
val storage2 = createStorage()) {
storage1.initialize(epoch1);
storage2.initialize(epoch2);
// Create segment in Storage1 (thus Storage1 owns it for now).
storage1.create(segmentName, TIMEOUT).join();
// Storage1 should be able to execute all operations.
SegmentHandle handle1 = storage1.openWrite(segmentName).join();
verifyWriteOperationsSucceed(handle1, storage1);
verifyReadOnlyOperationsSucceed(handle1, storage1);
// Open the segment in Storage2 (thus Storage2 owns it for now).
SegmentHandle handle2 = storage2.openWrite(segmentName).join();
// Storage1 should be able to execute only read-only operations.
verifyWriteOperationsFail(handle1, storage1);
verifyConcatOperationsFail(handle1, storage1);
verifyReadOnlyOperationsSucceed(handle1, storage1);
// Storage2 should be able to execute all operations.
verifyReadOnlyOperationsSucceed(handle2, storage2);
verifyWriteOperationsSucceed(handle2, storage2);
// Seal and Delete (these should be run last, otherwise we can't run our test).
verifyFinalWriteOperationsFail(handle1, storage1);
verifyFinalWriteOperationsSucceed(handle2, storage2);
}
}
use of io.pravega.segmentstore.storage.SegmentHandle in project pravega by pravega.
the class HDFSStorageTest method testNormalRead.
/**
* Tests a read scenario with no issues or failures.
*/
@Test
public void testNormalRead() throws Exception {
// Write data.
String segmentName = "foo_open";
val rnd = new Random(0);
try (Storage s = createStorage()) {
s.initialize(DEFAULT_EPOCH);
createSegment(segmentName, s);
SegmentHandle handle = s.openWrite(segmentName).join();
long expectedLength = 0;
ByteArrayOutputStream writtenData = new ByteArrayOutputStream();
for (int i = 0; i < WRITE_COUNT; i++) {
byte[] data = new byte[i + 1];
rnd.nextBytes(data);
s.write(handle, expectedLength, new ByteArrayInputStream(data), data.length, null).join();
writtenData.write(data);
expectedLength += data.length;
}
// Check written data via a Read Operation, from every offset from 0 to length/2
byte[] expectedData = writtenData.toByteArray();
val readHandle = s.openRead(segmentName).join();
for (int startOffset = 0; startOffset < expectedLength / 2; startOffset++) {
int readLength = (int) (expectedLength - 2 * startOffset);
byte[] actualData = new byte[readLength];
int readBytes = s.read(readHandle, startOffset, actualData, 0, actualData.length, null).join();
Assert.assertEquals("Unexpected number of bytes read with start offset " + startOffset, actualData.length, readBytes);
AssertExtensions.assertArrayEquals("Unexpected data read back with start offset " + startOffset, expectedData, startOffset, actualData, 0, readLength);
}
}
}
Aggregations