use of com.amazonaws.s3.model.PutObjectRequest in project extension-s3 by lucee.
the class S3 method write.
public void write(String bucketName, String objectName, File file, Object acl, String region) throws IOException {
bucketName = improveBucketName(bucketName);
objectName = improveObjectName(objectName, false);
flushExists(bucketName, objectName);
AmazonS3Client client = getAmazonS3(bucketName, region);
if (file.length() >= maxSize) {
try {
// Create a list of ETag objects. You retrieve ETags for each object part uploaded,
// then, after each individual part has been uploaded, pass the list of ETags to
// the request to complete the upload.
List<PartETag> partETags = new ArrayList<PartETag>();
long contentLength = file.length();
// Set part size to 100 MB.
long partSize = 100 * 1024 * 1024;
// Initiate the multipart upload.
InitiateMultipartUploadRequest initRequest = new InitiateMultipartUploadRequest(bucketName, objectName);
InitiateMultipartUploadResult initResponse = client.initiateMultipartUpload(initRequest);
// Upload the file parts.
long filePosition = 0;
long total = 0;
for (int i = 1; filePosition < contentLength; i++) {
// Because the last part could be less than 100 MB, adjust the part size as needed.
partSize = Math.min(partSize, (contentLength - filePosition));
total += partSize;
// Create the request to upload a part.
UploadPartRequest uploadRequest = new UploadPartRequest().withBucketName(bucketName).withKey(objectName).withUploadId(initResponse.getUploadId()).withPartNumber(i).withFileOffset(filePosition).withFile(file).withPartSize(partSize);
// TODO set ACL
// Upload the part and add the response's ETag to our list.
UploadPartResult uploadResult = client.uploadPart(uploadRequest);
partETags.add(uploadResult.getPartETag());
filePosition += partSize;
}
// Complete the multipart upload.
CompleteMultipartUploadRequest compRequest = new CompleteMultipartUploadRequest(bucketName, objectName, initResponse.getUploadId(), partETags);
client.completeMultipartUpload(compRequest);
if (acl != null) {
setACL(client, bucketName, objectName, acl);
}
} catch (AmazonServiceException ase) {
if (ase.getErrorCode().equals("NoSuchBucket")) {
createDirectory(bucketName, acl, region);
write(bucketName, objectName, file, acl, region);
return;
} else
throw toS3Exception(ase);
} finally {
client.release();
}
} else {
// create a PutObjectRequest passing the folder name suffixed by /
PutObjectRequest por = new PutObjectRequest(bucketName, objectName, file);
if (acl != null)
setACL(por, acl);
try {
client.putObject(por);
flushExists(bucketName, objectName);
} catch (AmazonServiceException ase) {
// size;error-code:EntityTooLarge;ProposedSize:5800000000;MaxSizeAllowed:5368709120
if (ase.getErrorCode().equals("EntityTooLarge")) {
S3Exception s3e = toS3Exception(ase);
if (s3e.getProposedSize() != 0 && s3e.getProposedSize() < maxSize) {
maxSize = s3e.getProposedSize();
write(bucketName, objectName, file, acl, region);
return;
}
throw s3e;
}
if (ase.getErrorCode().equals("NoSuchBucket")) {
createDirectory(bucketName, acl, region);
write(bucketName, objectName, file, acl, region);
return;
} else
throw toS3Exception(ase);
} finally {
client.release();
}
}
}
use of com.amazonaws.s3.model.PutObjectRequest in project extension-s3 by lucee.
the class S3 method write.
/**
* @param bucketName target bucket to create if not already exists
* @param objectName object to create
* @param data object content
* @param acl access control list
* @param region if bucket is not already existing, it get created using that region, if it is not
* defined the default region defined with the constructor is used
* @throws S3Exception
*/
public void write(String bucketName, String objectName, String data, String mimeType, Charset charset, Object acl, String region) throws IOException {
bucketName = improveBucketName(bucketName);
objectName = improveObjectName(objectName, false);
flushExists(bucketName, objectName);
AmazonS3Client client = getAmazonS3(bucketName, region);
String ct = toContentType(mimeType, charset, null);
byte[] bytes = charset == null ? data.getBytes() : data.getBytes(charset);
// unlikely this ever happen, so we do not write extra code for this
if (data.length() > maxSize) {
File tmp = File.createTempFile("writeString-", ".txt");
try {
Util.copy(new ByteArrayInputStream(bytes), new FileOutputStream(tmp), true, true);
write(bucketName, objectName, tmp, acl, region);
return;
} finally {
tmp.delete();
}
} else {
ObjectMetadata md = new ObjectMetadata();
if (ct != null)
md.setContentType(ct);
md.setLastModified(new Date());
// create a PutObjectRequest passing the folder name suffixed by /
md.setContentLength(bytes.length);
PutObjectRequest por = new PutObjectRequest(bucketName, objectName, new ByteArrayInputStream(bytes), md);
if (acl != null)
setACL(por, acl);
try {
// send request to S3 to create folder
try {
client.putObject(por);
flushExists(bucketName, objectName);
} catch (AmazonServiceException ase) {
if (ase.getErrorCode().equals("EntityTooLarge")) {
S3Exception s3e = toS3Exception(ase);
if (s3e.getProposedSize() != 0 && s3e.getProposedSize() < maxSize) {
maxSize = s3e.getProposedSize();
write(bucketName, objectName, data, mimeType, charset, acl, region);
return;
}
throw s3e;
}
if (ase.getErrorCode().equals("NoSuchBucket")) {
createDirectory(bucketName, acl, region);
write(bucketName, objectName, data, mimeType, charset, acl, region);
return;
} else
throw toS3Exception(ase);
}
} catch (AmazonServiceException se) {
throw toS3Exception(se);
} finally {
client.release();
}
}
}
use of com.amazonaws.s3.model.PutObjectRequest in project extension-s3 by lucee.
the class S3 method write.
public void write(String bucketName, String objectName, Resource res, Object acl, String region) throws IOException {
if (res instanceof File) {
write(bucketName, objectName, (File) res, acl, region);
return;
}
String ct = CFMLEngineFactory.getInstance().getResourceUtil().getMimeType(res, null);
try {
bucketName = improveBucketName(bucketName);
objectName = improveObjectName(objectName, false);
flushExists(bucketName, objectName);
// send request to S3 to create folder
AmazonS3Client client = getAmazonS3(bucketName, region);
if (res.length() > maxSize) {
File tmp = File.createTempFile("writeResource-", ".txt");
try {
Util.copy(res.getInputStream(), new FileOutputStream(tmp), true, true);
write(bucketName, objectName, tmp, acl, region);
return;
} finally {
tmp.delete();
}
} else {
InputStream is = null;
ObjectMetadata md = new ObjectMetadata();
md.setLastModified(new Date());
md.setContentLength(res.length());
try {
PutObjectRequest por = new PutObjectRequest(bucketName, objectName, is = res.getInputStream(), md);
if (acl != null)
setACL(por, acl);
client.putObject(por);
flushExists(bucketName, objectName);
} catch (AmazonServiceException ase) {
if (ase.getErrorCode().equals("EntityTooLarge")) {
S3Exception s3e = toS3Exception(ase);
if (s3e.getProposedSize() != 0 && s3e.getProposedSize() < maxSize) {
maxSize = s3e.getProposedSize();
write(bucketName, objectName, res, acl, region);
return;
}
throw s3e;
}
if (ase.getErrorCode().equals("NoSuchBucket")) {
createDirectory(bucketName, acl, region);
write(bucketName, objectName, res, acl, region);
return;
} else
throw toS3Exception(ase);
} finally {
Util.closeEL(is);
client.release();
}
}
} catch (AmazonServiceException se) {
throw toS3Exception(se);
}
}
use of com.amazonaws.s3.model.PutObjectRequest in project Project-Decode by cheery72.
the class UserServiceImpl method AwsFile.
private String AwsFile(MultipartFile file) {
String fileName = createFileName(file.getOriginalFilename());
ObjectMetadata objectMetadata = new ObjectMetadata();
objectMetadata.setContentLength(file.getSize());
objectMetadata.setContentType(file.getContentType());
try (InputStream inputStream = file.getInputStream()) {
amazonS3.putObject(new PutObjectRequest(bucket, fileName, inputStream, objectMetadata).withCannedAcl(CannedAccessControlList.PublicRead));
} catch (IOException e) {
throw new ResponseStatusException(HttpStatus.INTERNAL_SERVER_ERROR, "파일 업로드에 실패했습니다.");
}
return String.format(url + "/%s", fileName);
}
use of com.amazonaws.s3.model.PutObjectRequest in project kaldb by slackhq.
the class S3BlobFs method mkdir.
@Override
public boolean mkdir(URI uri) throws IOException {
LOG.info("mkdir {}", uri);
try {
Preconditions.checkNotNull(uri, "uri is null");
String path = normalizeToDirectoryPrefix(uri);
// Bucket root directory already exists and cannot be created
if (path.equals(DELIMITER)) {
return true;
}
PutObjectRequest putObjectRequest = PutObjectRequest.builder().bucket(uri.getHost()).key(path).build();
PutObjectResponse putObjectResponse = s3Client.putObject(putObjectRequest, RequestBody.fromBytes(new byte[0]));
return putObjectResponse.sdkHttpResponse().isSuccessful();
} catch (Throwable t) {
throw new IOException(t);
}
}
Aggregations