use of io.pravega.segmentstore.contracts.StreamSegmentExistsException in project pravega by pravega.
the class RollingStorage method create.
@Override
public SegmentProperties create(String segmentName, SegmentRollingPolicy rollingPolicy) throws StreamSegmentException {
Preconditions.checkNotNull(rollingPolicy, "rollingPolicy");
String headerName = StreamSegmentNameUtils.getHeaderSegmentName(segmentName);
long traceId = LoggerHelpers.traceEnter(log, "create", segmentName, rollingPolicy);
// RollingStorage to this baseStorage).
if (this.baseStorage.exists(segmentName)) {
throw new StreamSegmentExistsException(segmentName);
}
// Create the header file, and then serialize the contents to it.
// If the header file already exists, then it's OK if it's empty (probably a remnant from a previously failed
// attempt); in that case we ignore it and let the creation proceed.
SegmentHandle headerHandle = null;
try {
try {
this.baseStorage.create(headerName);
} catch (StreamSegmentExistsException ex) {
checkIfEmptyAndNotSealed(ex, headerName);
log.debug("Empty Segment Header found for '{}'; treating as inexistent.", segmentName);
}
headerHandle = this.baseStorage.openWrite(headerName);
serializeHandle(new RollingSegmentHandle(headerHandle, rollingPolicy, Collections.emptyList()));
} catch (StreamSegmentExistsException ex) {
throw ex;
} catch (Exception ex) {
if (!Exceptions.mustRethrow(ex) && headerHandle != null) {
// otherwise we'll leave behind an empty file.
try {
log.warn("Could not create Header Segment for '{}', rolling back.", segmentName, ex);
this.baseStorage.delete(headerHandle);
} catch (Exception ex2) {
ex.addSuppressed(ex2);
}
}
throw ex;
}
LoggerHelpers.traceLeave(log, "create", traceId, segmentName);
return StreamSegmentInformation.builder().name(segmentName).build();
}
use of io.pravega.segmentstore.contracts.StreamSegmentExistsException in project pravega by pravega.
the class RollingStorageTests method testBackwardsCompatibility.
/**
* Tests the ability to handle Segment files with no header, which simulates a scenario where we add RollingStorage
* to a Storage adapter that did not previously handle files this way.
*/
@Test
public void testBackwardsCompatibility() throws Exception {
final String segmentName = "SonHeaderSegment";
@Cleanup val baseStorage = new InMemoryStorage();
@Cleanup val s = new RollingStorage(baseStorage, DEFAULT_ROLLING_POLICY);
s.initialize(1);
// Create a plain Segment in the Base Storage; this will not have any headers or any special file layout.
baseStorage.create(segmentName);
// Verify create() with existing non-Header Segment.
AssertExtensions.assertThrows("create() allowed creating a new Segment which already existed.", () -> s.create(segmentName), ex -> ex instanceof StreamSegmentExistsException);
Assert.assertTrue("Non-Header Segment does not exist after failed create() attempt.", baseStorage.exists(segmentName));
Assert.assertFalse("A header was left behind (after create).", baseStorage.exists(StreamSegmentNameUtils.getHeaderSegmentName(segmentName)));
// Verify exists().
Assert.assertTrue("Unexpected result from exists() when called on a non-header Segment.", s.exists(segmentName));
// Verify openWrite(), write() and seal(). Verify no rolling even if we exceed default rolling policy.
val writeHandle = s.openWrite(segmentName);
val os = new ByteArrayOutputStream();
populate(s, writeHandle, os);
s.seal(writeHandle);
byte[] writtenData = os.toByteArray();
Assert.assertFalse("A header was left behind (after write).", baseStorage.exists(StreamSegmentNameUtils.getHeaderSegmentName(segmentName)));
// Verify getInfo().
val baseInfo = baseStorage.getStreamSegmentInfo(segmentName);
val rollingInfo = s.getStreamSegmentInfo(segmentName);
Assert.assertTrue("Segment not sealed.", baseInfo.isSealed());
Assert.assertEquals("Unexpected Segment length.", writtenData.length, baseInfo.getLength());
Assert.assertEquals("GetInfo.Name mismatch between base and rolling.", baseInfo.getName(), rollingInfo.getName());
Assert.assertEquals("GetInfo.Length mismatch between base and rolling.", baseInfo.getLength(), rollingInfo.getLength());
Assert.assertEquals("GetInfo.Sealed mismatch between base and rolling.", baseInfo.isSealed(), rollingInfo.isSealed());
// Verify openRead() and read().
val readHandle = s.openRead(segmentName);
checkWrittenData(writtenData, readHandle, s);
// Verify that truncate() is a no-op.
for (long truncateOffset = 0; truncateOffset < writtenData.length; truncateOffset += 10) {
s.truncate(writeHandle, truncateOffset);
}
checkWrittenData(writtenData, readHandle, s);
// Verify concat() with Source & Target non-Header Segments.
final String nonHeaderName = "NonHeaderSegment";
baseStorage.create(nonHeaderName);
val nonHeaderHandle = s.openWrite(nonHeaderName);
s.concat(nonHeaderHandle, 0, segmentName);
Assert.assertFalse("NonHeader source still exists after concat to NonHeader Segment.", s.exists(segmentName));
checkWrittenData(writtenData, s.openRead(nonHeaderName), s);
// Verify concat() with Source as non-Header Segment, but Target is a Header Segment.
final String withHeaderName = "WithHeader";
s.create(withHeaderName, DEFAULT_ROLLING_POLICY);
s.seal(nonHeaderHandle);
val withHeaderHandle = s.openWrite(withHeaderName);
s.concat(withHeaderHandle, 0, nonHeaderName);
Assert.assertFalse("NonHeader source still exists after concat to Header Segment.", s.exists(nonHeaderName));
val h1 = (RollingSegmentHandle) s.openRead(withHeaderName);
checkWrittenData(writtenData, h1, s);
Assert.assertEquals("Unexpected MaxLength after concat.", DEFAULT_ROLLING_POLICY.getMaxLength(), h1.getRollingPolicy().getMaxLength());
// Verify concat() with Source as Header Segment, but Target as a non-Header Segment.
// We reuse this Segment Name since it should have been gone by now.
baseStorage.create(nonHeaderName);
// Need to create a few SegmentChunks to force a Header concat.
populate(s, withHeaderHandle, os);
s.seal(withHeaderHandle);
s.concat(s.openWrite(nonHeaderName), 0, withHeaderName);
Assert.assertFalse("NonHeader source still exists after concat to Header Segment.", s.exists(withHeaderName));
val h2 = (RollingSegmentHandle) s.openRead(nonHeaderName);
checkWrittenData(writtenData, h2, s);
Assert.assertEquals("Unexpected MaxLength after concat into non-header segment.", SegmentRollingPolicy.NO_ROLLING.getMaxLength(), h2.getRollingPolicy().getMaxLength());
// Verify delete().
baseStorage.create(segmentName);
populate(s, s.openWrite(segmentName), new ByteArrayOutputStream());
s.delete(s.openWrite(segmentName));
Assert.assertFalse("Segment still exists after deletion.", s.exists(segmentName));
Assert.assertFalse("Segment still exists after deletion.", baseStorage.exists(segmentName));
}
use of io.pravega.segmentstore.contracts.StreamSegmentExistsException in project pravega by pravega.
the class ExtendedS3Storage method doCreate.
private SegmentProperties doCreate(String streamSegmentName) throws StreamSegmentExistsException {
long traceId = LoggerHelpers.traceEnter(log, "create", streamSegmentName);
if (!client.listObjects(config.getBucket(), config.getRoot() + streamSegmentName).getObjects().isEmpty()) {
throw new StreamSegmentExistsException(streamSegmentName);
}
S3ObjectMetadata metadata = new S3ObjectMetadata();
metadata.setContentLength((long) 0);
PutObjectRequest request = new PutObjectRequest(config.getBucket(), config.getRoot() + streamSegmentName, null);
AccessControlList acl = new AccessControlList();
acl.addGrants(new Grant(new CanonicalUser(config.getAccessKey(), config.getAccessKey()), READ_WRITE_PERMISSION));
request.setAcl(acl);
/* Default behavior of putObject is to overwrite an existing object. This behavior can cause data loss.
* Here is one of the scenarios in which data loss is observed:
* 1. Host A owns the container and gets a create operation. It has not executed the putObject operation yet.
* 2. Ownership changes and host B becomes the owner of the container. It picks up putObject from the queue, executes it.
* 3. Host B gets a write operation which executes successfully.
* 4. Now host A schedules the putObject. This will overwrite the write by host B.
*
* The solution for this issue is to implement put-if-absent behavior by using Set-If-None-Match header as described here:
* http://www.emc.com/techpubs/api/ecs/v3-0-0-0/S3ObjectOperations_createOrUpdateObject_7916bd6f789d0ae0ff39961c0e660d00_ba672412ac371bb6cf4e69291344510e_detail.htm
* But this does not work. Currently all the calls to putObject API fail if made with reqest.setIfNoneMatch("*").
* once the issue with extended S3 API is fixed, addition of this one line will ensure put-if-absent semantics.
* See: https://github.com/pravega/pravega/issues/1564
*
* This issue is fixed in some versions of extended S3 implementation. The following code sets the IfNoneMatch
* flag based on configuration.
*/
if (config.isUseNoneMatch()) {
request.setIfNoneMatch("*");
}
client.putObject(request);
LoggerHelpers.traceLeave(log, "create", traceId);
return doGetStreamSegmentInfo(streamSegmentName);
}
use of io.pravega.segmentstore.contracts.StreamSegmentExistsException in project pravega by pravega.
the class BookKeeperAdapter method createStream.
@Override
public CompletableFuture<Void> createStream(String logName, Duration timeout) {
ensureRunning();
int id;
synchronized (this.internalIds) {
if (this.internalIds.containsKey(logName)) {
return Futures.failedFuture(new StreamSegmentExistsException(logName));
}
id = this.internalIds.size();
this.internalIds.put(logName, id);
}
return CompletableFuture.runAsync(() -> {
DurableDataLog log = null;
boolean success = false;
try {
log = this.logFactory.createDurableDataLog(id);
this.logs.put(logName, log);
log.initialize(timeout);
success = true;
} catch (DurableDataLogException ex) {
throw new CompletionException(ex);
} finally {
if (!success) {
this.logs.remove(logName);
synchronized (this.internalIds) {
this.internalIds.remove(logName);
}
if (log != null) {
log.close();
}
}
}
}, this.executor);
}
use of io.pravega.segmentstore.contracts.StreamSegmentExistsException in project pravega by pravega.
the class ExtendedS3StorageTest method testCreateIfNoneMatch.
// region If-none-match test
/**
* Tests the create() method with if-none-match set. Note that we currently
* do not run a real storage tier, so we cannot verify the behavior of the
* option against a real storage. Here instead, we are simply making sure
* that the new execution path does not break anything.
*/
@Test
public void testCreateIfNoneMatch() {
val adapterConfig = ExtendedS3StorageConfig.builder().with(ExtendedS3StorageConfig.BUCKET, setup.adapterConfig.getBucket()).with(ExtendedS3StorageConfig.ACCESS_KEY_ID, "x").with(ExtendedS3StorageConfig.SECRET_KEY, "x").with(ExtendedS3StorageConfig.ROOT, "test").with(ExtendedS3StorageConfig.URI, setup.endpoint).with(ExtendedS3StorageConfig.USENONEMATCH, true).build();
String segmentName = "foo_open";
try (Storage s = createStorage(setup.client, adapterConfig, executorService())) {
s.initialize(DEFAULT_EPOCH);
s.create(segmentName, null).join();
assertThrows("create() did not throw for existing StreamSegment.", s.create(segmentName, null), ex -> ex instanceof StreamSegmentExistsException);
}
}
Aggregations