use of org.apache.jackrabbit.core.data.DataStoreException in project jackrabbit-oak by apache.
the class S3Backend method addMetadataRecord.
@Override
public void addMetadataRecord(File input, String name) throws DataStoreException {
ClassLoader contextClassLoader = Thread.currentThread().getContextClassLoader();
try {
Thread.currentThread().setContextClassLoader(getClass().getClassLoader());
Upload upload = tmx.upload(s3ReqDecorator.decorate(new PutObjectRequest(bucket, addMetaKeyPrefix(name), input)));
upload.waitForUploadResult();
} catch (InterruptedException e) {
LOG.error("Exception in uploading metadata file {}", new Object[] { input, e });
throw new DataStoreException("Error in uploading metadata file", e);
} finally {
if (contextClassLoader != null) {
Thread.currentThread().setContextClassLoader(contextClassLoader);
}
}
}
use of org.apache.jackrabbit.core.data.DataStoreException in project jackrabbit-oak by apache.
the class S3Backend method addMetadataRecord.
public void addMetadataRecord(final InputStream input, final String name) throws DataStoreException {
ClassLoader contextClassLoader = Thread.currentThread().getContextClassLoader();
try {
Thread.currentThread().setContextClassLoader(getClass().getClassLoader());
Upload upload = tmx.upload(s3ReqDecorator.decorate(new PutObjectRequest(bucket, addMetaKeyPrefix(name), input, new ObjectMetadata())));
upload.waitForUploadResult();
} catch (InterruptedException e) {
LOG.error("Error in uploading", e);
throw new DataStoreException("Error in uploading", e);
} finally {
if (contextClassLoader != null) {
Thread.currentThread().setContextClassLoader(contextClassLoader);
}
}
}
use of org.apache.jackrabbit.core.data.DataStoreException in project jackrabbit-oak by apache.
the class S3Backend method init.
/**
* Initialize S3Backend. It creates AmazonS3Client and TransferManager from
* aws.properties. It creates S3 bucket if it doesn't pre-exist in S3.
*/
@Override
public void init(CachingDataStore store, String homeDir, String config) throws DataStoreException {
Properties initProps = null;
//over config provided via file based config
if (this.properties != null) {
initProps = this.properties;
} else {
if (config == null) {
config = Utils.DEFAULT_CONFIG_FILE;
}
try {
initProps = Utils.readConfig(config);
} catch (IOException e) {
throw new DataStoreException("Could not initialize S3 from " + config, e);
}
this.properties = initProps;
}
init(store, homeDir, initProps);
}
use of org.apache.jackrabbit.core.data.DataStoreException in project jackrabbit-oak by apache.
the class S3Backend method deleteRecord.
@Override
public void deleteRecord(DataIdentifier identifier) throws DataStoreException {
long start = System.currentTimeMillis();
String key = getKeyName(identifier);
ClassLoader contextClassLoader = Thread.currentThread().getContextClassLoader();
try {
Thread.currentThread().setContextClassLoader(getClass().getClassLoader());
s3service.deleteObject(bucket, key);
LOG.debug("Identifier [{}] deleted. It took [{}]ms.", new Object[] { identifier, (System.currentTimeMillis() - start) });
} catch (AmazonServiceException e) {
throw new DataStoreException("Could not delete dataIdentifier " + identifier, e);
} finally {
if (contextClassLoader != null) {
Thread.currentThread().setContextClassLoader(contextClassLoader);
}
}
}
use of org.apache.jackrabbit.core.data.DataStoreException in project jackrabbit-oak by apache.
the class UploadStagingCacheTest method testAddUploadException.
/**
* Stage file unsuccessful upload.
* @throws Exception
*/
@Test
public void testAddUploadException() throws Exception {
final AtomicInteger count = new AtomicInteger(0);
TestStagingUploader secondTimeUploader = new TestStagingUploader(folder.newFolder()) {
@Override
public void write(String id, File f) throws DataStoreException {
if (count.get() == 0) {
throw new DataStoreException("Error in writing blob");
}
super.write(id, f);
}
};
// initialize staging cache using the mocked uploader
init(2, secondTimeUploader, null);
// Add load
List<ListenableFuture<Integer>> futures = put(folder);
//start
taskLatch.countDown();
callbackLatch.countDown();
waitFinish(futures);
// assert file retrieved from staging cache
File ret = stagingCache.getIfPresent(ID_PREFIX + 0);
assertTrue(Files.equal(copyToFile(randomStream(0, 4 * 1024), folder.newFile()), ret));
assertEquals(1, stagingCache.getStats().getLoadCount());
assertEquals(1, stagingCache.getStats().getLoadSuccessCount());
assertCacheStats(stagingCache, 1, 4 * 1024, 1, 1);
// Retry upload and wait for finish
count.incrementAndGet();
ScheduledFuture<?> scheduledFuture = removeExecutor.schedule(stagingCache.new RetryJob(), 0, TimeUnit.MILLISECONDS);
scheduledFuture.get();
afterExecuteLatch.await();
// Now uploaded
ret = stagingCache.getIfPresent(ID_PREFIX + 0);
assertNull(ret);
assertTrue(Files.equal(copyToFile(randomStream(0, 4 * 1024), folder.newFile()), secondTimeUploader.read(ID_PREFIX + 0)));
}
Aggregations