Search in sources :

Example 21 with PutObjectRequest

use of com.amazonaws.s3.model.PutObjectRequest in project extension-s3 by lucee.

the class S3 method write.

public void write(String bucketName, String objectName, File file, Object acl, String region) throws IOException {
    bucketName = improveBucketName(bucketName);
    objectName = improveObjectName(objectName, false);
    flushExists(bucketName, objectName);
    AmazonS3Client client = getAmazonS3(bucketName, region);
    if (file.length() >= maxSize) {
        try {
            // Create a list of ETag objects. You retrieve ETags for each object part uploaded,
            // then, after each individual part has been uploaded, pass the list of ETags to
            // the request to complete the upload.
            List<PartETag> partETags = new ArrayList<PartETag>();
            long contentLength = file.length();
            // Set part size to 100 MB.
            long partSize = 100 * 1024 * 1024;
            // Initiate the multipart upload.
            InitiateMultipartUploadRequest initRequest = new InitiateMultipartUploadRequest(bucketName, objectName);
            InitiateMultipartUploadResult initResponse = client.initiateMultipartUpload(initRequest);
            // Upload the file parts.
            long filePosition = 0;
            long total = 0;
            for (int i = 1; filePosition < contentLength; i++) {
                // Because the last part could be less than 100 MB, adjust the part size as needed.
                partSize = Math.min(partSize, (contentLength - filePosition));
                total += partSize;
                // Create the request to upload a part.
                UploadPartRequest uploadRequest = new UploadPartRequest().withBucketName(bucketName).withKey(objectName).withUploadId(initResponse.getUploadId()).withPartNumber(i).withFileOffset(filePosition).withFile(file).withPartSize(partSize);
                // TODO set ACL
                // Upload the part and add the response's ETag to our list.
                UploadPartResult uploadResult = client.uploadPart(uploadRequest);
                partETags.add(uploadResult.getPartETag());
                filePosition += partSize;
            }
            // Complete the multipart upload.
            CompleteMultipartUploadRequest compRequest = new CompleteMultipartUploadRequest(bucketName, objectName, initResponse.getUploadId(), partETags);
            client.completeMultipartUpload(compRequest);
            if (acl != null) {
                setACL(client, bucketName, objectName, acl);
            }
        } catch (AmazonServiceException ase) {
            if (ase.getErrorCode().equals("NoSuchBucket")) {
                createDirectory(bucketName, acl, region);
                write(bucketName, objectName, file, acl, region);
                return;
            } else
                throw toS3Exception(ase);
        } finally {
            client.release();
        }
    } else {
        // create a PutObjectRequest passing the folder name suffixed by /
        PutObjectRequest por = new PutObjectRequest(bucketName, objectName, file);
        if (acl != null)
            setACL(por, acl);
        try {
            client.putObject(por);
            flushExists(bucketName, objectName);
        } catch (AmazonServiceException ase) {
            // size;error-code:EntityTooLarge;ProposedSize:5800000000;MaxSizeAllowed:5368709120
            if (ase.getErrorCode().equals("EntityTooLarge")) {
                S3Exception s3e = toS3Exception(ase);
                if (s3e.getProposedSize() != 0 && s3e.getProposedSize() < maxSize) {
                    maxSize = s3e.getProposedSize();
                    write(bucketName, objectName, file, acl, region);
                    return;
                }
                throw s3e;
            }
            if (ase.getErrorCode().equals("NoSuchBucket")) {
                createDirectory(bucketName, acl, region);
                write(bucketName, objectName, file, acl, region);
                return;
            } else
                throw toS3Exception(ase);
        } finally {
            client.release();
        }
    }
}
Also used : InitiateMultipartUploadResult(com.amazonaws.services.s3.model.InitiateMultipartUploadResult) AmazonS3Exception(com.amazonaws.services.s3.model.AmazonS3Exception) ArrayList(java.util.ArrayList) InitiateMultipartUploadRequest(com.amazonaws.services.s3.model.InitiateMultipartUploadRequest) UploadPartRequest(com.amazonaws.services.s3.model.UploadPartRequest) PartETag(com.amazonaws.services.s3.model.PartETag) UploadPartResult(com.amazonaws.services.s3.model.UploadPartResult) AmazonS3Client(org.lucee.extension.resource.s3.pool.AmazonS3Client) AmazonServiceException(com.amazonaws.AmazonServiceException) PutObjectRequest(com.amazonaws.services.s3.model.PutObjectRequest) CompleteMultipartUploadRequest(com.amazonaws.services.s3.model.CompleteMultipartUploadRequest)

Example 22 with PutObjectRequest

use of com.amazonaws.s3.model.PutObjectRequest in project extension-s3 by lucee.

the class S3 method write.

/**
 * @param bucketName target bucket to create if not already exists
 * @param objectName object to create
 * @param data object content
 * @param acl access control list
 * @param region if bucket is not already existing, it get created using that region, if it is not
 *            defined the default region defined with the constructor is used
 * @throws S3Exception
 */
public void write(String bucketName, String objectName, String data, String mimeType, Charset charset, Object acl, String region) throws IOException {
    bucketName = improveBucketName(bucketName);
    objectName = improveObjectName(objectName, false);
    flushExists(bucketName, objectName);
    AmazonS3Client client = getAmazonS3(bucketName, region);
    String ct = toContentType(mimeType, charset, null);
    byte[] bytes = charset == null ? data.getBytes() : data.getBytes(charset);
    // unlikely this ever happen, so we do not write extra code for this
    if (data.length() > maxSize) {
        File tmp = File.createTempFile("writeString-", ".txt");
        try {
            Util.copy(new ByteArrayInputStream(bytes), new FileOutputStream(tmp), true, true);
            write(bucketName, objectName, tmp, acl, region);
            return;
        } finally {
            tmp.delete();
        }
    } else {
        ObjectMetadata md = new ObjectMetadata();
        if (ct != null)
            md.setContentType(ct);
        md.setLastModified(new Date());
        // create a PutObjectRequest passing the folder name suffixed by /
        md.setContentLength(bytes.length);
        PutObjectRequest por = new PutObjectRequest(bucketName, objectName, new ByteArrayInputStream(bytes), md);
        if (acl != null)
            setACL(por, acl);
        try {
            // send request to S3 to create folder
            try {
                client.putObject(por);
                flushExists(bucketName, objectName);
            } catch (AmazonServiceException ase) {
                if (ase.getErrorCode().equals("EntityTooLarge")) {
                    S3Exception s3e = toS3Exception(ase);
                    if (s3e.getProposedSize() != 0 && s3e.getProposedSize() < maxSize) {
                        maxSize = s3e.getProposedSize();
                        write(bucketName, objectName, data, mimeType, charset, acl, region);
                        return;
                    }
                    throw s3e;
                }
                if (ase.getErrorCode().equals("NoSuchBucket")) {
                    createDirectory(bucketName, acl, region);
                    write(bucketName, objectName, data, mimeType, charset, acl, region);
                    return;
                } else
                    throw toS3Exception(ase);
            }
        } catch (AmazonServiceException se) {
            throw toS3Exception(se);
        } finally {
            client.release();
        }
    }
}
Also used : AmazonS3Client(org.lucee.extension.resource.s3.pool.AmazonS3Client) ByteArrayInputStream(java.io.ByteArrayInputStream) AmazonS3Exception(com.amazonaws.services.s3.model.AmazonS3Exception) FileOutputStream(java.io.FileOutputStream) AmazonServiceException(com.amazonaws.AmazonServiceException) File(java.io.File) ObjectMetadata(com.amazonaws.services.s3.model.ObjectMetadata) Date(java.util.Date) PutObjectRequest(com.amazonaws.services.s3.model.PutObjectRequest)

Example 23 with PutObjectRequest

use of com.amazonaws.s3.model.PutObjectRequest in project extension-s3 by lucee.

the class S3 method write.

public void write(String bucketName, String objectName, Resource res, Object acl, String region) throws IOException {
    if (res instanceof File) {
        write(bucketName, objectName, (File) res, acl, region);
        return;
    }
    String ct = CFMLEngineFactory.getInstance().getResourceUtil().getMimeType(res, null);
    try {
        bucketName = improveBucketName(bucketName);
        objectName = improveObjectName(objectName, false);
        flushExists(bucketName, objectName);
        // send request to S3 to create folder
        AmazonS3Client client = getAmazonS3(bucketName, region);
        if (res.length() > maxSize) {
            File tmp = File.createTempFile("writeResource-", ".txt");
            try {
                Util.copy(res.getInputStream(), new FileOutputStream(tmp), true, true);
                write(bucketName, objectName, tmp, acl, region);
                return;
            } finally {
                tmp.delete();
            }
        } else {
            InputStream is = null;
            ObjectMetadata md = new ObjectMetadata();
            md.setLastModified(new Date());
            md.setContentLength(res.length());
            try {
                PutObjectRequest por = new PutObjectRequest(bucketName, objectName, is = res.getInputStream(), md);
                if (acl != null)
                    setACL(por, acl);
                client.putObject(por);
                flushExists(bucketName, objectName);
            } catch (AmazonServiceException ase) {
                if (ase.getErrorCode().equals("EntityTooLarge")) {
                    S3Exception s3e = toS3Exception(ase);
                    if (s3e.getProposedSize() != 0 && s3e.getProposedSize() < maxSize) {
                        maxSize = s3e.getProposedSize();
                        write(bucketName, objectName, res, acl, region);
                        return;
                    }
                    throw s3e;
                }
                if (ase.getErrorCode().equals("NoSuchBucket")) {
                    createDirectory(bucketName, acl, region);
                    write(bucketName, objectName, res, acl, region);
                    return;
                } else
                    throw toS3Exception(ase);
            } finally {
                Util.closeEL(is);
                client.release();
            }
        }
    } catch (AmazonServiceException se) {
        throw toS3Exception(se);
    }
}
Also used : AmazonS3Client(org.lucee.extension.resource.s3.pool.AmazonS3Client) ByteArrayInputStream(java.io.ByteArrayInputStream) FileInputStream(java.io.FileInputStream) InputStream(java.io.InputStream) AmazonS3Exception(com.amazonaws.services.s3.model.AmazonS3Exception) FileOutputStream(java.io.FileOutputStream) AmazonServiceException(com.amazonaws.AmazonServiceException) File(java.io.File) ObjectMetadata(com.amazonaws.services.s3.model.ObjectMetadata) Date(java.util.Date) PutObjectRequest(com.amazonaws.services.s3.model.PutObjectRequest)

Example 24 with PutObjectRequest

use of com.amazonaws.s3.model.PutObjectRequest in project Project-Decode by cheery72.

the class UserServiceImpl method AwsFile.

private String AwsFile(MultipartFile file) {
    String fileName = createFileName(file.getOriginalFilename());
    ObjectMetadata objectMetadata = new ObjectMetadata();
    objectMetadata.setContentLength(file.getSize());
    objectMetadata.setContentType(file.getContentType());
    try (InputStream inputStream = file.getInputStream()) {
        amazonS3.putObject(new PutObjectRequest(bucket, fileName, inputStream, objectMetadata).withCannedAcl(CannedAccessControlList.PublicRead));
    } catch (IOException e) {
        throw new ResponseStatusException(HttpStatus.INTERNAL_SERVER_ERROR, "파일 업로드에 실패했습니다.");
    }
    return String.format(url + "/%s", fileName);
}
Also used : InputStream(java.io.InputStream) IOException(java.io.IOException) ObjectMetadata(com.amazonaws.services.s3.model.ObjectMetadata) ResponseStatusException(org.springframework.web.server.ResponseStatusException) PutObjectRequest(com.amazonaws.services.s3.model.PutObjectRequest)

Example 25 with PutObjectRequest

use of com.amazonaws.s3.model.PutObjectRequest in project kaldb by slackhq.

the class S3BlobFs method mkdir.

@Override
public boolean mkdir(URI uri) throws IOException {
    LOG.info("mkdir {}", uri);
    try {
        Preconditions.checkNotNull(uri, "uri is null");
        String path = normalizeToDirectoryPrefix(uri);
        // Bucket root directory already exists and cannot be created
        if (path.equals(DELIMITER)) {
            return true;
        }
        PutObjectRequest putObjectRequest = PutObjectRequest.builder().bucket(uri.getHost()).key(path).build();
        PutObjectResponse putObjectResponse = s3Client.putObject(putObjectRequest, RequestBody.fromBytes(new byte[0]));
        return putObjectResponse.sdkHttpResponse().isSuccessful();
    } catch (Throwable t) {
        throw new IOException(t);
    }
}
Also used : PutObjectResponse(software.amazon.awssdk.services.s3.model.PutObjectResponse) IOException(java.io.IOException) PutObjectRequest(software.amazon.awssdk.services.s3.model.PutObjectRequest)

Aggregations

PutObjectRequest (com.amazonaws.services.s3.model.PutObjectRequest)301 ObjectMetadata (com.amazonaws.services.s3.model.ObjectMetadata)193 ByteArrayInputStream (java.io.ByteArrayInputStream)113 Test (org.junit.Test)113 PutObjectRequest (software.amazon.awssdk.services.s3.model.PutObjectRequest)78 File (java.io.File)68 IOException (java.io.IOException)65 InputStream (java.io.InputStream)55 S3FileTransferRequestParamsDto (org.finra.herd.model.dto.S3FileTransferRequestParamsDto)42 AmazonClientException (com.amazonaws.AmazonClientException)40 PutObjectResult (com.amazonaws.services.s3.model.PutObjectResult)39 Upload (com.amazonaws.services.s3.transfer.Upload)37 AmazonServiceException (com.amazonaws.AmazonServiceException)35 Test (org.junit.jupiter.api.Test)30 AmazonS3 (com.amazonaws.services.s3.AmazonS3)28 AmazonS3Exception (com.amazonaws.services.s3.model.AmazonS3Exception)20 Date (java.util.Date)20 BusinessObjectDataKey (org.finra.herd.model.api.xml.BusinessObjectDataKey)20 StorageUnitEntity (org.finra.herd.model.jpa.StorageUnitEntity)20 RequestBody (software.amazon.awssdk.core.sync.RequestBody)19