Search in sources :

Example 1 with AmazonS3Client

use of org.lucee.extension.resource.s3.pool.AmazonS3Client in project extension-s3 by lucee.

the class S3 method write.

public void write(String bucketName, String objectName, File file, Object acl, String region) throws IOException {
    bucketName = improveBucketName(bucketName);
    objectName = improveObjectName(objectName, false);
    flushExists(bucketName, objectName);
    AmazonS3Client client = getAmazonS3(bucketName, region);
    if (file.length() >= maxSize) {
        try {
            // Create a list of ETag objects. You retrieve ETags for each object part uploaded,
            // then, after each individual part has been uploaded, pass the list of ETags to
            // the request to complete the upload.
            List<PartETag> partETags = new ArrayList<PartETag>();
            long contentLength = file.length();
            // Set part size to 100 MB.
            long partSize = 100 * 1024 * 1024;
            // Initiate the multipart upload.
            InitiateMultipartUploadRequest initRequest = new InitiateMultipartUploadRequest(bucketName, objectName);
            InitiateMultipartUploadResult initResponse = client.initiateMultipartUpload(initRequest);
            // Upload the file parts.
            long filePosition = 0;
            long total = 0;
            for (int i = 1; filePosition < contentLength; i++) {
                // Because the last part could be less than 100 MB, adjust the part size as needed.
                partSize = Math.min(partSize, (contentLength - filePosition));
                total += partSize;
                // Create the request to upload a part.
                UploadPartRequest uploadRequest = new UploadPartRequest().withBucketName(bucketName).withKey(objectName).withUploadId(initResponse.getUploadId()).withPartNumber(i).withFileOffset(filePosition).withFile(file).withPartSize(partSize);
                // TODO set ACL
                // Upload the part and add the response's ETag to our list.
                UploadPartResult uploadResult = client.uploadPart(uploadRequest);
                partETags.add(uploadResult.getPartETag());
                filePosition += partSize;
            }
            // Complete the multipart upload.
            CompleteMultipartUploadRequest compRequest = new CompleteMultipartUploadRequest(bucketName, objectName, initResponse.getUploadId(), partETags);
            client.completeMultipartUpload(compRequest);
            if (acl != null) {
                setACL(client, bucketName, objectName, acl);
            }
        } catch (AmazonServiceException ase) {
            if (ase.getErrorCode().equals("NoSuchBucket")) {
                createDirectory(bucketName, acl, region);
                write(bucketName, objectName, file, acl, region);
                return;
            } else
                throw toS3Exception(ase);
        } finally {
            client.release();
        }
    } else {
        // create a PutObjectRequest passing the folder name suffixed by /
        PutObjectRequest por = new PutObjectRequest(bucketName, objectName, file);
        if (acl != null)
            setACL(por, acl);
        try {
            client.putObject(por);
            flushExists(bucketName, objectName);
        } catch (AmazonServiceException ase) {
            // size;error-code:EntityTooLarge;ProposedSize:5800000000;MaxSizeAllowed:5368709120
            if (ase.getErrorCode().equals("EntityTooLarge")) {
                S3Exception s3e = toS3Exception(ase);
                if (s3e.getProposedSize() != 0 && s3e.getProposedSize() < maxSize) {
                    maxSize = s3e.getProposedSize();
                    write(bucketName, objectName, file, acl, region);
                    return;
                }
                throw s3e;
            }
            if (ase.getErrorCode().equals("NoSuchBucket")) {
                createDirectory(bucketName, acl, region);
                write(bucketName, objectName, file, acl, region);
                return;
            } else
                throw toS3Exception(ase);
        } finally {
            client.release();
        }
    }
}
Also used : InitiateMultipartUploadResult(com.amazonaws.services.s3.model.InitiateMultipartUploadResult) AmazonS3Exception(com.amazonaws.services.s3.model.AmazonS3Exception) ArrayList(java.util.ArrayList) InitiateMultipartUploadRequest(com.amazonaws.services.s3.model.InitiateMultipartUploadRequest) UploadPartRequest(com.amazonaws.services.s3.model.UploadPartRequest) PartETag(com.amazonaws.services.s3.model.PartETag) UploadPartResult(com.amazonaws.services.s3.model.UploadPartResult) AmazonS3Client(org.lucee.extension.resource.s3.pool.AmazonS3Client) AmazonServiceException(com.amazonaws.AmazonServiceException) PutObjectRequest(com.amazonaws.services.s3.model.PutObjectRequest) CompleteMultipartUploadRequest(com.amazonaws.services.s3.model.CompleteMultipartUploadRequest)

Example 2 with AmazonS3Client

use of org.lucee.extension.resource.s3.pool.AmazonS3Client in project extension-s3 by lucee.

the class S3 method listObjectsAsQuery.

public Query listObjectsAsQuery(String bucketName) throws S3Exception, PageException {
    AmazonS3Client client = getAmazonS3(bucketName, null);
    try {
        CFMLEngine eng = CFMLEngineFactory.getInstance();
        Creation creator = eng.getCreationUtil();
        final Key objectName = creator.createKey("objectName");
        final Key size = creator.createKey("size");
        final Key lastModified = creator.createKey("lastModified");
        final Key owner = creator.createKey("lastModified");
        Query qry = eng.getCreationUtil().createQuery(new Key[] { objectName, size, lastModified, owner }, 0, "buckets");
        ObjectListing objects = client.listObjects(bucketName);
        /* Recursively delete all the objects inside given bucket */
        List<S3Object> list = new ArrayList<>();
        if (objects != null && objects.getObjectSummaries() != null) {
            int row;
            while (true) {
                for (S3ObjectSummary summary : objects.getObjectSummaries()) {
                    row = qry.addRow();
                    qry.setAt(objectName, row, summary.getKey());
                    qry.setAt(lastModified, row, summary.getLastModified());
                    qry.setAt(size, row, summary.getSize());
                    qry.setAt(owner, row, summary.getOwner().getDisplayName());
                }
                if (objects.isTruncated()) {
                    objects = client.listNextBatchOfObjects(objects);
                } else {
                    break;
                }
            }
        }
        return qry;
    } catch (AmazonServiceException ase) {
        throw toS3Exception(ase);
    } finally {
        client.release();
    }
}
Also used : AmazonS3Client(org.lucee.extension.resource.s3.pool.AmazonS3Client) Creation(lucee.runtime.util.Creation) Query(lucee.runtime.type.Query) ArrayList(java.util.ArrayList) AmazonServiceException(com.amazonaws.AmazonServiceException) ObjectListing(com.amazonaws.services.s3.model.ObjectListing) S3ObjectSummary(com.amazonaws.services.s3.model.S3ObjectSummary) CFMLEngine(lucee.loader.engine.CFMLEngine) S3Object(com.amazonaws.services.s3.model.S3Object) Key(lucee.runtime.type.Collection.Key)

Example 3 with AmazonS3Client

use of org.lucee.extension.resource.s3.pool.AmazonS3Client in project extension-s3 by lucee.

the class S3 method get.

public S3Info get(String bucketName, final String objectName) throws S3Exception {
    if (Util.isEmpty(objectName)) {
        return get(bucketName);
    }
    bucketName = improveBucketName(bucketName);
    String nameFile = improveObjectName(objectName, false);
    String nameDir = improveObjectName(objectName, true);
    // cache
    S3Info info = cacheTimeout <= 0 ? null : exists.get(toKey(bucketName, nameFile));
    if (info != null && info.validUntil() >= System.currentTimeMillis()) {
        if (info instanceof NotExisting)
            return null;
        return info;
    }
    info = null;
    AmazonS3Client client = getAmazonS3(bucketName, null);
    try {
        long validUntil = System.currentTimeMillis() + cacheTimeout;
        ObjectListing objects = null;
        try {
            ListObjectsRequest lor = new ListObjectsRequest();
            lor.setBucketName(bucketName);
            lor.setPrefix(nameFile);
            lor.setMaxKeys(100);
            objects = client.listObjects(lor);
        } catch (Exception e) {
            if (log != null)
                log.error("s3", e);
            else
                e.printStackTrace();
        }
        /* Recursively delete all the objects inside given bucket */
        if (objects == null || objects.getObjectSummaries() == null || objects.getObjectSummaries().size() == 0) {
            // we do not return this, we just store it to cache that it
            exists.put(toKey(bucketName, objectName), new NotExisting(bucketName, objectName, validUntil, log));
            // does
            return null;
        }
        String targetName;
        S3ObjectSummary stoObj = null;
        int count = 0;
        // while (true) {
        for (S3ObjectSummary summary : objects.getObjectSummaries()) {
            count++;
            // direct match
            targetName = summary.getKey();
            if (nameFile.equals(targetName) || nameDir.equals(targetName)) {
                exists.put(toKey(bucketName, nameFile), info = new StorageObjectWrapper(this, stoObj = summary, validUntil, log));
            }
            // pseudo directory?
            // if (info == null) {
            targetName = summary.getKey();
            if (nameDir.length() < targetName.length() && targetName.startsWith(nameDir)) {
                exists.put(toKey(bucketName, nameFile), info = new ParentObject(this, bucketName, nameDir, validUntil, log));
            }
            // set the value to exist when not a match
            if (!(stoObj != null && stoObj.equals(summary))) {
                exists.put(toKey(summary.getBucketName(), summary.getKey()), new StorageObjectWrapper(this, summary, validUntil, log));
            }
            // set all the parents when not exist
            // TODO handle that also a file with that name can exist at the same time
            String parent = nameFile;
            int index;
            while ((index = parent.lastIndexOf('/')) != -1) {
                parent = parent.substring(0, index);
                exists.put(toKey(bucketName, parent), new ParentObject(this, bucketName, parent, validUntil, log));
            }
        }
        // }
        if (info == null) {
            // we do not return this, we just store it to cache that it does
            exists.put(// we do not return this, we just store it to cache that it does
            toKey(bucketName, objectName), // we do not return this, we just store it to cache that it does
            new NotExisting(bucketName, objectName, validUntil, log));
        }
        return info;
    } catch (AmazonServiceException ase) {
        throw toS3Exception(ase);
    } finally {
        client.release();
    }
}
Also used : NotExisting(org.lucee.extension.resource.s3.info.NotExisting) ObjectListing(com.amazonaws.services.s3.model.ObjectListing) S3ObjectSummary(com.amazonaws.services.s3.model.S3ObjectSummary) PageException(lucee.runtime.exp.PageException) AmazonServiceException(com.amazonaws.AmazonServiceException) AmazonS3Exception(com.amazonaws.services.s3.model.AmazonS3Exception) IOException(java.io.IOException) ParentObject(org.lucee.extension.resource.s3.info.ParentObject) ListObjectsRequest(com.amazonaws.services.s3.model.ListObjectsRequest) AmazonS3Client(org.lucee.extension.resource.s3.pool.AmazonS3Client) StorageObjectWrapper(org.lucee.extension.resource.s3.info.StorageObjectWrapper) AmazonServiceException(com.amazonaws.AmazonServiceException) S3Info(org.lucee.extension.resource.s3.info.S3Info)

Example 4 with AmazonS3Client

use of org.lucee.extension.resource.s3.pool.AmazonS3Client in project extension-s3 by lucee.

the class S3 method addAccessControlList.

public void addAccessControlList(String bucketName, String objectName, Object objACL) throws S3Exception, PageException {
    AmazonS3Client client = getAmazonS3(bucketName, null);
    try {
        bucketName = improveBucketName(bucketName);
        objectName = improveObjectName(objectName);
        AccessControlList acl = getACL(client, bucketName, objectName);
        acl.grantAllPermissions(AccessControlListUtil.toGrantAndPermissions(objACL));
        client.setObjectAcl(bucketName, objectName, acl);
    // is it necessary to set it for bucket as well?
    } catch (AmazonServiceException se) {
        throw toS3Exception(se);
    } finally {
        client.release();
    }
}
Also used : CannedAccessControlList(com.amazonaws.services.s3.model.CannedAccessControlList) AccessControlList(com.amazonaws.services.s3.model.AccessControlList) AmazonS3Client(org.lucee.extension.resource.s3.pool.AmazonS3Client) AmazonServiceException(com.amazonaws.AmazonServiceException)

Example 5 with AmazonS3Client

use of org.lucee.extension.resource.s3.pool.AmazonS3Client in project extension-s3 by lucee.

the class S3 method getBucketRegion.

public Regions getBucketRegion(String bucketName, boolean loadIfNecessary) throws S3Exception {
    bucketName = improveBucketName(bucketName);
    Object o = bucketRegions.get(bucketName);
    if (o != null) {
        if (o == ERROR)
            return null;
        return (Regions) o;
    }
    Regions r = null;
    if (loadIfNecessary) {
        AmazonS3Client client = getAmazonS3(null, null);
        try {
            r = toRegions(client.getBucketLocation(bucketName));
            bucketRegions.put(bucketName, r);
        } catch (AmazonServiceException ase) {
            if (ase.getErrorCode().equals("NoSuchBucket")) {
                return null;
            }
            if (log != null)
                log.error("s3", "failed to load region", ase);
            else
                ase.printStackTrace();
            // could be AccessDenied
            bucketRegions.put(bucketName, ERROR);
            return null;
        } finally {
            client.release();
        }
    }
    return r;
}
Also used : AmazonS3Client(org.lucee.extension.resource.s3.pool.AmazonS3Client) AmazonServiceException(com.amazonaws.AmazonServiceException) ParentObject(org.lucee.extension.resource.s3.info.ParentObject) S3Object(com.amazonaws.services.s3.model.S3Object) Regions(com.amazonaws.regions.Regions)

Aggregations

AmazonS3Client (org.lucee.extension.resource.s3.pool.AmazonS3Client)27 AmazonServiceException (com.amazonaws.AmazonServiceException)21 AmazonS3Exception (com.amazonaws.services.s3.model.AmazonS3Exception)8 ObjectListing (com.amazonaws.services.s3.model.ObjectListing)7 ObjectMetadata (com.amazonaws.services.s3.model.ObjectMetadata)6 S3Object (com.amazonaws.services.s3.model.S3Object)6 S3ObjectSummary (com.amazonaws.services.s3.model.S3ObjectSummary)6 ArrayList (java.util.ArrayList)6 ParentObject (org.lucee.extension.resource.s3.info.ParentObject)6 PutObjectRequest (com.amazonaws.services.s3.model.PutObjectRequest)5 Bucket (com.amazonaws.services.s3.model.Bucket)4 ByteArrayInputStream (java.io.ByteArrayInputStream)4 Date (java.util.Date)4 AccessControlList (com.amazonaws.services.s3.model.AccessControlList)3 CannedAccessControlList (com.amazonaws.services.s3.model.CannedAccessControlList)3 File (java.io.File)3 FileOutputStream (java.io.FileOutputStream)3 CopyObjectRequest (com.amazonaws.services.s3.model.CopyObjectRequest)2 CreateBucketRequest (com.amazonaws.services.s3.model.CreateBucketRequest)2 DeleteObjectsRequest (com.amazonaws.services.s3.model.DeleteObjectsRequest)2