use of com.amazonaws.services.s3.model.ObjectMetadata in project jackrabbit by apache.
the class S3Backend method getLastModified.
@Override
public long getLastModified(DataIdentifier identifier) throws DataStoreException {
long start = System.currentTimeMillis();
String key = getKeyName(identifier);
ClassLoader contextClassLoader = Thread.currentThread().getContextClassLoader();
try {
Thread.currentThread().setContextClassLoader(getClass().getClassLoader());
ObjectMetadata object = s3service.getObjectMetadata(bucket, key);
long lastModified = object.getLastModified().getTime();
LOG.debug("Identifier [{}]'s lastModified = [{}] took [{}]ms.", new Object[] { identifier, lastModified, (System.currentTimeMillis() - start) });
return lastModified;
} catch (AmazonServiceException e) {
if (e.getStatusCode() == 404 || e.getStatusCode() == 403) {
LOG.info("getLastModified:Identifier [{}] not found. Took [{}] ms.", identifier, (System.currentTimeMillis() - start));
}
throw new DataStoreException(e);
} finally {
if (contextClassLoader != null) {
Thread.currentThread().setContextClassLoader(contextClassLoader);
}
}
}
use of com.amazonaws.services.s3.model.ObjectMetadata in project jackrabbit by apache.
the class S3Backend method write.
private void write(DataIdentifier identifier, File file, boolean asyncUpload, AsyncUploadCallback callback) throws DataStoreException {
String key = getKeyName(identifier);
ObjectMetadata objectMetaData = null;
long start = System.currentTimeMillis();
ClassLoader contextClassLoader = Thread.currentThread().getContextClassLoader();
try {
Thread.currentThread().setContextClassLoader(getClass().getClassLoader());
// check if the same record already exists
try {
objectMetaData = s3service.getObjectMetadata(bucket, key);
} catch (AmazonServiceException ase) {
if (!(ase.getStatusCode() == 404 || ase.getStatusCode() == 403)) {
throw ase;
}
}
if (objectMetaData != null) {
long l = objectMetaData.getContentLength();
if (l != file.length()) {
throw new DataStoreException("Collision: " + key + " new length: " + file.length() + " old length: " + l);
}
LOG.debug("[{}]'s exists, lastmodified = [{}]", key, objectMetaData.getLastModified().getTime());
CopyObjectRequest copReq = new CopyObjectRequest(bucket, key, bucket, key);
copReq.setNewObjectMetadata(objectMetaData);
Copy copy = tmx.copy(s3ReqDecorator.decorate(copReq));
try {
copy.waitForCopyResult();
LOG.debug("lastModified of [{}] updated successfully.", identifier);
if (callback != null) {
callback.onSuccess(new AsyncUploadResult(identifier, file));
}
} catch (Exception e2) {
AsyncUploadResult asyncUpRes = new AsyncUploadResult(identifier, file);
asyncUpRes.setException(e2);
if (callback != null) {
callback.onAbort(asyncUpRes);
}
throw new DataStoreException("Could not upload " + key, e2);
}
}
if (objectMetaData == null) {
try {
// start multipart parallel upload using amazon sdk
Upload up = tmx.upload(s3ReqDecorator.decorate(new PutObjectRequest(bucket, key, file)));
// wait for upload to finish
if (asyncUpload) {
up.addProgressListener(new S3UploadProgressListener(up, identifier, file, callback));
LOG.debug("added upload progress listener to identifier [{}]", identifier);
} else {
up.waitForUploadResult();
LOG.debug("synchronous upload to identifier [{}] completed.", identifier);
if (callback != null) {
callback.onSuccess(new AsyncUploadResult(identifier, file));
}
}
} catch (Exception e2) {
AsyncUploadResult asyncUpRes = new AsyncUploadResult(identifier, file);
asyncUpRes.setException(e2);
if (callback != null) {
callback.onAbort(asyncUpRes);
}
throw new DataStoreException("Could not upload " + key, e2);
}
}
} finally {
if (contextClassLoader != null) {
Thread.currentThread().setContextClassLoader(contextClassLoader);
}
}
LOG.debug("write of [{}], length=[{}], in async mode [{}], in [{}]ms", new Object[] { identifier, file.length(), asyncUpload, (System.currentTimeMillis() - start) });
}
use of com.amazonaws.services.s3.model.ObjectMetadata in project jackrabbit by apache.
the class S3Backend method getLength.
@Override
public long getLength(DataIdentifier identifier) throws DataStoreException {
long start = System.currentTimeMillis();
String key = getKeyName(identifier);
ClassLoader contextClassLoader = Thread.currentThread().getContextClassLoader();
try {
Thread.currentThread().setContextClassLoader(getClass().getClassLoader());
ObjectMetadata object = s3service.getObjectMetadata(bucket, key);
long length = object.getContentLength();
LOG.debug("Identifier [{}]'s length = [{}] took [{}]ms.", new Object[] { identifier, length, (System.currentTimeMillis() - start) });
return length;
} catch (AmazonServiceException e) {
throw new DataStoreException("Could not length of dataIdentifier " + identifier, e);
} finally {
if (contextClassLoader != null) {
Thread.currentThread().setContextClassLoader(contextClassLoader);
}
}
}
use of com.amazonaws.services.s3.model.ObjectMetadata in project exhibitor by soabase.
the class S3ConfigProvider method storeConfig.
@Override
public LoadedInstanceConfig storeConfig(ConfigCollection config, long compareVersion) throws Exception {
{
ObjectMetadata metadata = getConfigMetadata();
if (metadata != null) {
Date lastModified = metadata.getLastModified();
if (lastModified.getTime() != compareVersion) {
// apparently there's no atomic way to do this with S3 so this will have to do
return null;
}
}
}
PropertyBasedInstanceConfig propertyBasedInstanceConfig = new PropertyBasedInstanceConfig(config);
ByteArrayOutputStream out = new ByteArrayOutputStream();
propertyBasedInstanceConfig.getProperties().store(out, "Auto-generated by Exhibitor " + hostname);
byte[] bytes = out.toByteArray();
ObjectMetadata metadata = S3Utils.simpleUploadFile(s3Client, bytes, arguments.getBucket(), arguments.getKey());
return new LoadedInstanceConfig(propertyBasedInstanceConfig, metadata.getLastModified().getTime());
}
use of com.amazonaws.services.s3.model.ObjectMetadata in project exhibitor by soabase.
the class S3PseudoLock method createFile.
@Override
protected void createFile(String key, byte[] contents) throws Exception {
ObjectMetadata metadata = new ObjectMetadata();
metadata.setContentLength(contents.length);
PutObjectRequest request = new PutObjectRequest(bucket, key, new ByteArrayInputStream(contents), metadata);
client.putObject(request);
}
Aggregations