use of org.apache.jackrabbit.core.data.DataStoreException in project jackrabbit-oak by apache.
the class S3DataStoreFactory method asCloseable.
private static Closeable asCloseable(final CachingDataStore store, final File tempHomeDir) {
return new Closeable() {
@Override
public void close() throws IOException {
try {
while (!store.getPendingUploads().isEmpty()) {
log.info("Waiting for following uploads to finish: " + store.getPendingUploads());
Thread.sleep(1000);
}
store.close();
FileUtils.deleteDirectory(tempHomeDir);
} catch (DataStoreException e) {
throw new IOException(e);
} catch (InterruptedException e) {
throw new IOException(e);
}
}
};
}
use of org.apache.jackrabbit.core.data.DataStoreException in project jackrabbit-oak by apache.
the class SafeDataStoreBlobStore method getStream.
@Override
protected InputStream getStream(String blobId) throws IOException {
try {
DataRecord record = getDataRecord(blobId);
if (record == null) {
log.warn("No blob found for id [{}]", blobId);
return new ByteArrayInputStream(new byte[0]);
}
InputStream in = getDataRecord(blobId).getStream();
if (!(in instanceof BufferedInputStream)) {
in = new BufferedInputStream(in);
}
return StatsCollectingStreams.wrap(stats, blobId, in);
} catch (DataStoreException e) {
throw new IOException(e);
}
}
use of org.apache.jackrabbit.core.data.DataStoreException in project jackrabbit-oak by apache.
the class AbstractSharedCachingDataStore method addRecord.
@Override
public DataRecord addRecord(InputStream inputStream, BlobOptions blobOptions) throws DataStoreException {
Stopwatch watch = Stopwatch.createStarted();
try {
TransientFileFactory fileFactory = TransientFileFactory.getInstance();
File tmpFile = fileFactory.createTransientFile("upload", null, tmp);
// Copy the stream to the temporary file and calculate the
// stream length and the message digest of the stream
MessageDigest digest = MessageDigest.getInstance(DIGEST);
OutputStream output = new DigestOutputStream(new FileOutputStream(tmpFile), digest);
long length = 0;
try {
length = IOUtils.copyLarge(inputStream, output);
} finally {
output.close();
}
DataIdentifier identifier = new DataIdentifier(encodeHexString(digest.digest()));
LOG.debug("SHA-256 of [{}], length =[{}] took [{}] ms ", identifier, length, watch.elapsed(TimeUnit.MILLISECONDS));
// otherwise add to backend
if (blobOptions.getUpload() == SYNCHRONOUS || !cache.stage(identifier.toString(), tmpFile)) {
backend.write(identifier, tmpFile);
LOG.info("Added blob [{}] to backend", identifier);
// offer to download cache
cache.getDownloadCache().put(identifier.toString(), tmpFile);
}
return getRecordIfStored(identifier);
} catch (Exception e) {
LOG.error("Error in adding record");
throw new DataStoreException("Error in adding record ", e);
}
}
use of org.apache.jackrabbit.core.data.DataStoreException in project jackrabbit-oak by apache.
the class S3Backend method write.
/**
* It uploads file to Amazon S3. If file size is greater than 5MB, this
* method uses parallel concurrent connections to upload.
*/
@Override
public void write(DataIdentifier identifier, File file) throws DataStoreException {
String key = getKeyName(identifier);
ObjectMetadata objectMetaData = null;
long start = System.currentTimeMillis();
ClassLoader contextClassLoader = Thread.currentThread().getContextClassLoader();
try {
Thread.currentThread().setContextClassLoader(getClass().getClassLoader());
// check if the same record already exists
try {
objectMetaData = s3service.getObjectMetadata(bucket, key);
} catch (AmazonServiceException ase) {
if (!(ase.getStatusCode() == 404 || ase.getStatusCode() == 403)) {
throw ase;
}
}
if (objectMetaData != null) {
long l = objectMetaData.getContentLength();
if (l != file.length()) {
throw new DataStoreException("Collision: " + key + " new length: " + file.length() + " old length: " + l);
}
LOG.debug("[{}]'s exists, lastmodified = [{}]", key, objectMetaData.getLastModified().getTime());
CopyObjectRequest copReq = new CopyObjectRequest(bucket, key, bucket, key);
copReq.setNewObjectMetadata(objectMetaData);
Copy copy = tmx.copy(s3ReqDecorator.decorate(copReq));
try {
copy.waitForCopyResult();
LOG.debug("lastModified of [{}] updated successfully.", identifier);
} catch (Exception e2) {
throw new DataStoreException("Could not upload " + key, e2);
}
}
if (objectMetaData == null) {
try {
// start multipart parallel upload using amazon sdk
Upload up = tmx.upload(s3ReqDecorator.decorate(new PutObjectRequest(bucket, key, file)));
// wait for upload to finish
up.waitForUploadResult();
LOG.debug("synchronous upload to identifier [{}] completed.", identifier);
} catch (Exception e2) {
throw new DataStoreException("Could not upload " + key, e2);
}
}
} finally {
if (contextClassLoader != null) {
Thread.currentThread().setContextClassLoader(contextClassLoader);
}
}
LOG.debug("write of [{}], length=[{}], in [{}]ms", identifier, file.length(), (System.currentTimeMillis() - start));
}
use of org.apache.jackrabbit.core.data.DataStoreException in project jackrabbit-oak by apache.
the class S3Backend method read.
@Override
public InputStream read(DataIdentifier identifier) throws DataStoreException {
long start = System.currentTimeMillis();
String key = getKeyName(identifier);
ClassLoader contextClassLoader = Thread.currentThread().getContextClassLoader();
try {
Thread.currentThread().setContextClassLoader(getClass().getClassLoader());
S3Object object = s3service.getObject(bucket, key);
InputStream in = object.getObjectContent();
LOG.debug("[{}] read took [{}]ms", identifier, (System.currentTimeMillis() - start));
return in;
} catch (AmazonServiceException e) {
throw new DataStoreException("Object not found: " + key, e);
} finally {
if (contextClassLoader != null) {
Thread.currentThread().setContextClassLoader(contextClassLoader);
}
}
}
Aggregations