use of com.amazonaws.services.s3.model.Bucket in project deeplearning4j by deeplearning4j.
the class S3Uploader method multiPartUpload.
/**
* Multi part upload for big files
* @param file the file to upload
* @param bucketName the bucket name to upload
*/
public void multiPartUpload(File file, String bucketName) {
AmazonS3 client = new AmazonS3Client(creds);
bucketName = ensureValidBucketName(bucketName);
List<Bucket> buckets = client.listBuckets();
for (Bucket b : buckets) if (b.getName().equals(bucketName)) {
doMultiPart(client, bucketName, file);
return;
}
//bucket didn't exist: create it
client.createBucket(bucketName);
doMultiPart(client, bucketName, file);
}
use of com.amazonaws.services.s3.model.Bucket in project elasticsearch by elastic.
the class TestAmazonS3 method putObject.
@Override
public PutObjectResult putObject(String bucketName, String key, InputStream input, ObjectMetadata metadata) throws AmazonClientException, AmazonServiceException {
if (shouldFail(bucketName, key, writeFailureRate)) {
long length = metadata.getContentLength();
long partToRead = (long) (length * randomDouble());
byte[] buffer = new byte[1024];
for (long cur = 0; cur < partToRead; cur += buffer.length) {
try {
input.read(buffer, 0, (int) (partToRead - cur > buffer.length ? buffer.length : partToRead - cur));
} catch (IOException ex) {
throw new ElasticsearchException("cannot read input stream", ex);
}
}
logger.info("--> random write failure on putObject method: throwing an exception for [bucket={}, key={}]", bucketName, key);
AmazonS3Exception ex = new AmazonS3Exception("Random S3 exception");
ex.setStatusCode(400);
ex.setErrorCode("RequestTimeout");
throw ex;
} else {
return super.putObject(bucketName, key, input, metadata);
}
}
use of com.amazonaws.services.s3.model.Bucket in project elasticsearch by elastic.
the class S3BlobStoreTests method testInitCannedACL.
public void testInitCannedACL() throws IOException {
String[] aclList = new String[] { "private", "public-read", "public-read-write", "authenticated-read", "log-delivery-write", "bucket-owner-read", "bucket-owner-full-control" };
//empty acl
assertThat(S3BlobStore.initCannedACL(null), equalTo(CannedAccessControlList.Private));
assertThat(S3BlobStore.initCannedACL(""), equalTo(CannedAccessControlList.Private));
// it should init cannedACL correctly
for (String aclString : aclList) {
CannedAccessControlList acl = S3BlobStore.initCannedACL(aclString);
assertThat(acl.toString(), equalTo(aclString));
}
// it should accept all aws cannedACLs
for (CannedAccessControlList awsList : CannedAccessControlList.values()) {
CannedAccessControlList acl = S3BlobStore.initCannedACL(awsList.toString());
assertThat(acl, equalTo(awsList));
}
}
use of com.amazonaws.services.s3.model.Bucket in project hadoop by apache.
the class S3AFileSystem method getObjectMetadata.
/**
* Request object metadata; increments counters in the process.
* @param key key
* @return the metadata
*/
protected ObjectMetadata getObjectMetadata(String key) {
incrementStatistic(OBJECT_METADATA_REQUESTS);
GetObjectMetadataRequest request = new GetObjectMetadataRequest(bucket, key);
//SSE-C requires to be filled in if enabled for object metadata
if (S3AEncryptionMethods.SSE_C.equals(serverSideEncryptionAlgorithm) && StringUtils.isNotBlank(getServerSideEncryptionKey(getConf()))) {
request.setSSECustomerKey(generateSSECustomerKey());
}
ObjectMetadata meta = s3.getObjectMetadata(request);
incrementReadOperations();
return meta;
}
use of com.amazonaws.services.s3.model.Bucket in project hadoop by apache.
the class S3AFileSystem method copyFile.
/**
* Copy a single object in the bucket via a COPY operation.
* @param srcKey source object path
* @param dstKey destination object path
* @param size object size
* @throws AmazonClientException on failures inside the AWS SDK
* @throws InterruptedIOException the operation was interrupted
* @throws IOException Other IO problems
*/
private void copyFile(String srcKey, String dstKey, long size) throws IOException, InterruptedIOException, AmazonClientException {
LOG.debug("copyFile {} -> {} ", srcKey, dstKey);
try {
ObjectMetadata srcom = getObjectMetadata(srcKey);
ObjectMetadata dstom = cloneObjectMetadata(srcom);
setOptionalObjectMetadata(dstom);
CopyObjectRequest copyObjectRequest = new CopyObjectRequest(bucket, srcKey, bucket, dstKey);
setOptionalCopyObjectRequestParameters(copyObjectRequest);
copyObjectRequest.setCannedAccessControlList(cannedACL);
copyObjectRequest.setNewObjectMetadata(dstom);
ProgressListener progressListener = new ProgressListener() {
public void progressChanged(ProgressEvent progressEvent) {
switch(progressEvent.getEventType()) {
case TRANSFER_PART_COMPLETED_EVENT:
incrementWriteOperations();
break;
default:
break;
}
}
};
Copy copy = transfers.copy(copyObjectRequest);
copy.addProgressListener(progressListener);
try {
copy.waitForCopyResult();
incrementWriteOperations();
instrumentation.filesCopied(1, size);
} catch (InterruptedException e) {
throw new InterruptedIOException("Interrupted copying " + srcKey + " to " + dstKey + ", cancelling");
}
} catch (AmazonClientException e) {
throw translateException("copyFile(" + srcKey + ", " + dstKey + ")", srcKey, e);
}
}
Aggregations