use of org.lucee.extension.resource.s3.pool.AmazonS3Client in project extension-s3 by lucee.
the class S3 method write.
public void write(String bucketName, String objectName, File file, Object acl, String region) throws IOException {
bucketName = improveBucketName(bucketName);
objectName = improveObjectName(objectName, false);
flushExists(bucketName, objectName);
AmazonS3Client client = getAmazonS3(bucketName, region);
if (file.length() >= maxSize) {
try {
// Create a list of ETag objects. You retrieve ETags for each object part uploaded,
// then, after each individual part has been uploaded, pass the list of ETags to
// the request to complete the upload.
List<PartETag> partETags = new ArrayList<PartETag>();
long contentLength = file.length();
// Set part size to 100 MB.
long partSize = 100 * 1024 * 1024;
// Initiate the multipart upload.
InitiateMultipartUploadRequest initRequest = new InitiateMultipartUploadRequest(bucketName, objectName);
InitiateMultipartUploadResult initResponse = client.initiateMultipartUpload(initRequest);
// Upload the file parts.
long filePosition = 0;
long total = 0;
for (int i = 1; filePosition < contentLength; i++) {
// Because the last part could be less than 100 MB, adjust the part size as needed.
partSize = Math.min(partSize, (contentLength - filePosition));
total += partSize;
// Create the request to upload a part.
UploadPartRequest uploadRequest = new UploadPartRequest().withBucketName(bucketName).withKey(objectName).withUploadId(initResponse.getUploadId()).withPartNumber(i).withFileOffset(filePosition).withFile(file).withPartSize(partSize);
// TODO set ACL
// Upload the part and add the response's ETag to our list.
UploadPartResult uploadResult = client.uploadPart(uploadRequest);
partETags.add(uploadResult.getPartETag());
filePosition += partSize;
}
// Complete the multipart upload.
CompleteMultipartUploadRequest compRequest = new CompleteMultipartUploadRequest(bucketName, objectName, initResponse.getUploadId(), partETags);
client.completeMultipartUpload(compRequest);
if (acl != null) {
setACL(client, bucketName, objectName, acl);
}
} catch (AmazonServiceException ase) {
if (ase.getErrorCode().equals("NoSuchBucket")) {
createDirectory(bucketName, acl, region);
write(bucketName, objectName, file, acl, region);
return;
} else
throw toS3Exception(ase);
} finally {
client.release();
}
} else {
// create a PutObjectRequest passing the folder name suffixed by /
PutObjectRequest por = new PutObjectRequest(bucketName, objectName, file);
if (acl != null)
setACL(por, acl);
try {
client.putObject(por);
flushExists(bucketName, objectName);
} catch (AmazonServiceException ase) {
// size;error-code:EntityTooLarge;ProposedSize:5800000000;MaxSizeAllowed:5368709120
if (ase.getErrorCode().equals("EntityTooLarge")) {
S3Exception s3e = toS3Exception(ase);
if (s3e.getProposedSize() != 0 && s3e.getProposedSize() < maxSize) {
maxSize = s3e.getProposedSize();
write(bucketName, objectName, file, acl, region);
return;
}
throw s3e;
}
if (ase.getErrorCode().equals("NoSuchBucket")) {
createDirectory(bucketName, acl, region);
write(bucketName, objectName, file, acl, region);
return;
} else
throw toS3Exception(ase);
} finally {
client.release();
}
}
}
use of org.lucee.extension.resource.s3.pool.AmazonS3Client in project extension-s3 by lucee.
the class S3 method listObjectsAsQuery.
public Query listObjectsAsQuery(String bucketName) throws S3Exception, PageException {
AmazonS3Client client = getAmazonS3(bucketName, null);
try {
CFMLEngine eng = CFMLEngineFactory.getInstance();
Creation creator = eng.getCreationUtil();
final Key objectName = creator.createKey("objectName");
final Key size = creator.createKey("size");
final Key lastModified = creator.createKey("lastModified");
final Key owner = creator.createKey("lastModified");
Query qry = eng.getCreationUtil().createQuery(new Key[] { objectName, size, lastModified, owner }, 0, "buckets");
ObjectListing objects = client.listObjects(bucketName);
/* Recursively delete all the objects inside given bucket */
List<S3Object> list = new ArrayList<>();
if (objects != null && objects.getObjectSummaries() != null) {
int row;
while (true) {
for (S3ObjectSummary summary : objects.getObjectSummaries()) {
row = qry.addRow();
qry.setAt(objectName, row, summary.getKey());
qry.setAt(lastModified, row, summary.getLastModified());
qry.setAt(size, row, summary.getSize());
qry.setAt(owner, row, summary.getOwner().getDisplayName());
}
if (objects.isTruncated()) {
objects = client.listNextBatchOfObjects(objects);
} else {
break;
}
}
}
return qry;
} catch (AmazonServiceException ase) {
throw toS3Exception(ase);
} finally {
client.release();
}
}
use of org.lucee.extension.resource.s3.pool.AmazonS3Client in project extension-s3 by lucee.
the class S3 method get.
public S3Info get(String bucketName, final String objectName) throws S3Exception {
if (Util.isEmpty(objectName)) {
return get(bucketName);
}
bucketName = improveBucketName(bucketName);
String nameFile = improveObjectName(objectName, false);
String nameDir = improveObjectName(objectName, true);
// cache
S3Info info = cacheTimeout <= 0 ? null : exists.get(toKey(bucketName, nameFile));
if (info != null && info.validUntil() >= System.currentTimeMillis()) {
if (info instanceof NotExisting)
return null;
return info;
}
info = null;
AmazonS3Client client = getAmazonS3(bucketName, null);
try {
long validUntil = System.currentTimeMillis() + cacheTimeout;
ObjectListing objects = null;
try {
ListObjectsRequest lor = new ListObjectsRequest();
lor.setBucketName(bucketName);
lor.setPrefix(nameFile);
lor.setMaxKeys(100);
objects = client.listObjects(lor);
} catch (Exception e) {
if (log != null)
log.error("s3", e);
else
e.printStackTrace();
}
/* Recursively delete all the objects inside given bucket */
if (objects == null || objects.getObjectSummaries() == null || objects.getObjectSummaries().size() == 0) {
// we do not return this, we just store it to cache that it
exists.put(toKey(bucketName, objectName), new NotExisting(bucketName, objectName, validUntil, log));
// does
return null;
}
String targetName;
S3ObjectSummary stoObj = null;
int count = 0;
// while (true) {
for (S3ObjectSummary summary : objects.getObjectSummaries()) {
count++;
// direct match
targetName = summary.getKey();
if (nameFile.equals(targetName) || nameDir.equals(targetName)) {
exists.put(toKey(bucketName, nameFile), info = new StorageObjectWrapper(this, stoObj = summary, validUntil, log));
}
// pseudo directory?
// if (info == null) {
targetName = summary.getKey();
if (nameDir.length() < targetName.length() && targetName.startsWith(nameDir)) {
exists.put(toKey(bucketName, nameFile), info = new ParentObject(this, bucketName, nameDir, validUntil, log));
}
// set the value to exist when not a match
if (!(stoObj != null && stoObj.equals(summary))) {
exists.put(toKey(summary.getBucketName(), summary.getKey()), new StorageObjectWrapper(this, summary, validUntil, log));
}
// set all the parents when not exist
// TODO handle that also a file with that name can exist at the same time
String parent = nameFile;
int index;
while ((index = parent.lastIndexOf('/')) != -1) {
parent = parent.substring(0, index);
exists.put(toKey(bucketName, parent), new ParentObject(this, bucketName, parent, validUntil, log));
}
}
// }
if (info == null) {
// we do not return this, we just store it to cache that it does
exists.put(// we do not return this, we just store it to cache that it does
toKey(bucketName, objectName), // we do not return this, we just store it to cache that it does
new NotExisting(bucketName, objectName, validUntil, log));
}
return info;
} catch (AmazonServiceException ase) {
throw toS3Exception(ase);
} finally {
client.release();
}
}
use of org.lucee.extension.resource.s3.pool.AmazonS3Client in project extension-s3 by lucee.
the class S3 method addAccessControlList.
public void addAccessControlList(String bucketName, String objectName, Object objACL) throws S3Exception, PageException {
AmazonS3Client client = getAmazonS3(bucketName, null);
try {
bucketName = improveBucketName(bucketName);
objectName = improveObjectName(objectName);
AccessControlList acl = getACL(client, bucketName, objectName);
acl.grantAllPermissions(AccessControlListUtil.toGrantAndPermissions(objACL));
client.setObjectAcl(bucketName, objectName, acl);
// is it necessary to set it for bucket as well?
} catch (AmazonServiceException se) {
throw toS3Exception(se);
} finally {
client.release();
}
}
use of org.lucee.extension.resource.s3.pool.AmazonS3Client in project extension-s3 by lucee.
the class S3 method getBucketRegion.
public Regions getBucketRegion(String bucketName, boolean loadIfNecessary) throws S3Exception {
bucketName = improveBucketName(bucketName);
Object o = bucketRegions.get(bucketName);
if (o != null) {
if (o == ERROR)
return null;
return (Regions) o;
}
Regions r = null;
if (loadIfNecessary) {
AmazonS3Client client = getAmazonS3(null, null);
try {
r = toRegions(client.getBucketLocation(bucketName));
bucketRegions.put(bucketName, r);
} catch (AmazonServiceException ase) {
if (ase.getErrorCode().equals("NoSuchBucket")) {
return null;
}
if (log != null)
log.error("s3", "failed to load region", ase);
else
ase.printStackTrace();
// could be AccessDenied
bucketRegions.put(bucketName, ERROR);
return null;
} finally {
client.release();
}
}
return r;
}
Aggregations