use of com.amazonaws.services.s3.model.ObjectMetadata in project jackrabbit-oak by apache.
the class S3Backend method write.
/**
* It uploads file to Amazon S3. If file size is greater than 5MB, this
* method uses parallel concurrent connections to upload.
*/
@Override
public void write(DataIdentifier identifier, File file) throws DataStoreException {
String key = getKeyName(identifier);
ObjectMetadata objectMetaData = null;
long start = System.currentTimeMillis();
ClassLoader contextClassLoader = Thread.currentThread().getContextClassLoader();
try {
Thread.currentThread().setContextClassLoader(getClass().getClassLoader());
// check if the same record already exists
try {
objectMetaData = s3service.getObjectMetadata(bucket, key);
} catch (AmazonServiceException ase) {
if (!(ase.getStatusCode() == 404 || ase.getStatusCode() == 403)) {
throw ase;
}
}
if (objectMetaData != null) {
long l = objectMetaData.getContentLength();
if (l != file.length()) {
throw new DataStoreException("Collision: " + key + " new length: " + file.length() + " old length: " + l);
}
LOG.debug("[{}]'s exists, lastmodified = [{}]", key, objectMetaData.getLastModified().getTime());
CopyObjectRequest copReq = new CopyObjectRequest(bucket, key, bucket, key);
copReq.setNewObjectMetadata(objectMetaData);
Copy copy = tmx.copy(s3ReqDecorator.decorate(copReq));
try {
copy.waitForCopyResult();
LOG.debug("lastModified of [{}] updated successfully.", identifier);
} catch (Exception e2) {
throw new DataStoreException("Could not upload " + key, e2);
}
}
if (objectMetaData == null) {
try {
// start multipart parallel upload using amazon sdk
Upload up = tmx.upload(s3ReqDecorator.decorate(new PutObjectRequest(bucket, key, file)));
// wait for upload to finish
up.waitForUploadResult();
LOG.debug("synchronous upload to identifier [{}] completed.", identifier);
} catch (Exception e2) {
throw new DataStoreException("Could not upload " + key, e2);
}
}
} finally {
if (contextClassLoader != null) {
Thread.currentThread().setContextClassLoader(contextClassLoader);
}
}
LOG.debug("write of [{}], length=[{}], in [{}]ms", identifier, file.length(), (System.currentTimeMillis() - start));
}
use of com.amazonaws.services.s3.model.ObjectMetadata in project jackrabbit-oak by apache.
the class S3Backend method addMetadataRecord.
public void addMetadataRecord(final InputStream input, final String name) throws DataStoreException {
ClassLoader contextClassLoader = Thread.currentThread().getContextClassLoader();
try {
Thread.currentThread().setContextClassLoader(getClass().getClassLoader());
Upload upload = tmx.upload(s3ReqDecorator.decorate(new PutObjectRequest(bucket, addMetaKeyPrefix(name), input, new ObjectMetadata())));
upload.waitForUploadResult();
} catch (InterruptedException e) {
LOG.error("Error in uploading", e);
throw new DataStoreException("Error in uploading", e);
} finally {
if (contextClassLoader != null) {
Thread.currentThread().setContextClassLoader(contextClassLoader);
}
}
}
use of com.amazonaws.services.s3.model.ObjectMetadata in project jackrabbit-oak by apache.
the class S3Backend method getMetadataRecord.
public DataRecord getMetadataRecord(String name) {
ClassLoader contextClassLoader = Thread.currentThread().getContextClassLoader();
try {
Thread.currentThread().setContextClassLoader(getClass().getClassLoader());
ObjectMetadata meta = s3service.getObjectMetadata(bucket, addMetaKeyPrefix(name));
return new S3DataRecord(s3service, bucket, name, meta.getLastModified().getTime(), meta.getContentLength(), true);
} finally {
if (contextClassLoader != null) {
Thread.currentThread().setContextClassLoader(contextClassLoader);
}
}
}
use of com.amazonaws.services.s3.model.ObjectMetadata in project jackrabbit-oak by apache.
the class S3RequestDecorator method decorate.
/**
* Set encryption in {@link CopyObjectRequest}
*/
public CopyObjectRequest decorate(CopyObjectRequest request) {
switch(getDataEncryption()) {
case SSE_S3:
ObjectMetadata metadata = request.getNewObjectMetadata() == null ? new ObjectMetadata() : request.getNewObjectMetadata();
metadata.setSSEAlgorithm(ObjectMetadata.AES_256_SERVER_SIDE_ENCRYPTION);
request.setNewObjectMetadata(metadata);
break;
case NONE:
break;
}
return request;
}
use of com.amazonaws.services.s3.model.ObjectMetadata in project cloudstack by apache.
the class S3TemplateDownloader method download.
@Override
public long download(boolean resume, DownloadCompleteCallback callback) {
if (!status.equals(Status.NOT_STARTED)) {
// Only start downloading if we haven't started yet.
LOGGER.debug("Template download is already started, not starting again. Template: " + downloadUrl);
return 0;
}
int responseCode;
if ((responseCode = HTTPUtils.executeMethod(httpClient, getMethod)) == -1) {
errorString = "Exception while executing HttpMethod " + getMethod.getName() + " on URL " + downloadUrl;
LOGGER.warn(errorString);
status = Status.UNRECOVERABLE_ERROR;
return 0;
}
if (!HTTPUtils.verifyResponseCode(responseCode)) {
errorString = "Response code for GetMethod of " + downloadUrl + " is incorrect, responseCode: " + responseCode;
LOGGER.warn(errorString);
status = Status.UNRECOVERABLE_ERROR;
return 0;
}
// Headers
Header contentLengthHeader = getMethod.getResponseHeader("Content-Length");
Header contentTypeHeader = getMethod.getResponseHeader("Content-Type");
// Check the contentLengthHeader and transferEncodingHeader.
if (contentLengthHeader == null) {
errorString = "The ContentLengthHeader of " + downloadUrl + " isn't supplied";
LOGGER.warn(errorString);
status = Status.UNRECOVERABLE_ERROR;
return 0;
} else {
// The ContentLengthHeader is supplied, parse it's value.
remoteSize = Long.parseLong(contentLengthHeader.getValue());
}
if (remoteSize > maxTemplateSizeInByte) {
errorString = "Remote size is too large for template " + downloadUrl + " remote size is " + remoteSize + " max allowed is " + maxTemplateSizeInByte;
LOGGER.warn(errorString);
status = Status.UNRECOVERABLE_ERROR;
return 0;
}
InputStream inputStream;
try {
inputStream = new BufferedInputStream(getMethod.getResponseBodyAsStream());
} catch (IOException e) {
errorString = "Exception occurred while opening InputStream for template " + downloadUrl;
LOGGER.warn(errorString);
status = Status.UNRECOVERABLE_ERROR;
return 0;
}
LOGGER.info("Starting download from " + downloadUrl + " to S3 bucket " + s3TO.getBucketName() + " and size " + remoteSize + " bytes");
// Time the upload starts.
final Date start = new Date();
ObjectMetadata objectMetadata = new ObjectMetadata();
objectMetadata.setContentLength(remoteSize);
if (contentTypeHeader.getValue() != null) {
objectMetadata.setContentType(contentTypeHeader.getValue());
}
// Create the PutObjectRequest.
PutObjectRequest putObjectRequest = new PutObjectRequest(s3TO.getBucketName(), s3Key, inputStream, objectMetadata);
// If reduced redundancy is enabled, set it.
if (s3TO.getEnableRRS()) {
putObjectRequest.withStorageClass(StorageClass.ReducedRedundancy);
}
Upload upload = S3Utils.putObject(s3TO, putObjectRequest);
upload.addProgressListener(new ProgressListener() {
@Override
public void progressChanged(ProgressEvent progressEvent) {
// Record the amount of bytes transferred.
totalBytes += progressEvent.getBytesTransferred();
LOGGER.trace("Template download from " + downloadUrl + " to S3 bucket " + s3TO.getBucketName() + " transferred " + totalBytes + " in " + ((new Date().getTime() - start.getTime()) / 1000) + " seconds");
if (progressEvent.getEventType() == ProgressEventType.TRANSFER_STARTED_EVENT) {
status = Status.IN_PROGRESS;
} else if (progressEvent.getEventType() == ProgressEventType.TRANSFER_COMPLETED_EVENT) {
status = Status.DOWNLOAD_FINISHED;
} else if (progressEvent.getEventType() == ProgressEventType.TRANSFER_CANCELED_EVENT) {
status = Status.ABORTED;
} else if (progressEvent.getEventType() == ProgressEventType.TRANSFER_FAILED_EVENT) {
status = Status.UNRECOVERABLE_ERROR;
}
}
});
try {
// Wait for the upload to complete.
upload.waitForCompletion();
} catch (InterruptedException e) {
// Interruption while waiting for the upload to complete.
LOGGER.warn("Interruption occurred while waiting for upload of " + downloadUrl + " to complete");
}
downloadTime = new Date().getTime() - start.getTime();
if (status == Status.DOWNLOAD_FINISHED) {
LOGGER.info("Template download from " + downloadUrl + " to S3 bucket " + s3TO.getBucketName() + " transferred " + totalBytes + " in " + (downloadTime / 1000) + " seconds, completed successfully!");
} else {
LOGGER.warn("Template download from " + downloadUrl + " to S3 bucket " + s3TO.getBucketName() + " transferred " + totalBytes + " in " + (downloadTime / 1000) + " seconds, completed with status " + status.toString());
}
// Close input stream
getMethod.releaseConnection();
// Call the callback!
if (callback != null) {
callback.downloadComplete(status);
}
return totalBytes;
}
Aggregations