Search in sources :

Example 6 with StorageClass

use of com.amazonaws.services.s3.model.StorageClass in project Singularity by HubSpot.

the class SingularityS3Uploader method uploadSingle.

protected void uploadSingle(int sequence, Path file) throws Exception {
    Retryer<Boolean> retryer = RetryerBuilder.<Boolean>newBuilder().retryIfExceptionOfType(AmazonS3Exception.class).retryIfRuntimeException().withWaitStrategy(WaitStrategies.fixedWait(configuration.getRetryWaitMs(), TimeUnit.MILLISECONDS)).withStopStrategy(StopStrategies.stopAfterAttempt(configuration.getRetryCount())).build();
    retryer.call(() -> {
        final long start = System.currentTimeMillis();
        final String key = SingularityS3FormatHelper.getKey(uploadMetadata.getS3KeyFormat(), sequence, Files.getLastModifiedTime(file).toMillis(), Objects.toString(file.getFileName()), hostname);
        long fileSizeBytes = Files.size(file);
        LOG.info("{} Uploading {} to {}/{} (size {})", logIdentifier, file, bucketName, key, fileSizeBytes);
        try {
            ObjectMetadata objectMetadata = new ObjectMetadata();
            UploaderFileAttributes fileAttributes = getFileAttributes(file);
            if (fileAttributes.getStartTime().isPresent()) {
                objectMetadata.addUserMetadata(SingularityS3Log.LOG_START_S3_ATTR, fileAttributes.getStartTime().get().toString());
                LOG.debug("Added extra metadata for object ({}:{})", SingularityS3Log.LOG_START_S3_ATTR, fileAttributes.getStartTime().get());
            }
            if (fileAttributes.getEndTime().isPresent()) {
                objectMetadata.addUserMetadata(SingularityS3Log.LOG_END_S3_ATTR, fileAttributes.getEndTime().get().toString());
                LOG.debug("Added extra metadata for object ({}:{})", SingularityS3Log.LOG_END_S3_ATTR, fileAttributes.getEndTime().get());
            }
            for (SingularityS3UploaderContentHeaders contentHeaders : configuration.getS3ContentHeaders()) {
                if (file.toString().endsWith(contentHeaders.getFilenameEndsWith())) {
                    LOG.debug("{} Using content headers {} for file {}", logIdentifier, contentHeaders, file);
                    if (contentHeaders.getContentType().isPresent()) {
                        objectMetadata.setContentType(contentHeaders.getContentType().get());
                    }
                    if (contentHeaders.getContentEncoding().isPresent()) {
                        objectMetadata.setContentEncoding(contentHeaders.getContentEncoding().get());
                    }
                    break;
                }
            }
            Optional<StorageClass> maybeStorageClass = Optional.absent();
            if (shouldApplyStorageClass(fileSizeBytes, uploadMetadata.getS3StorageClass())) {
                LOG.debug("{} adding storage class {} to {}", logIdentifier, uploadMetadata.getS3StorageClass().get(), file);
                maybeStorageClass = Optional.of(StorageClass.fromValue(uploadMetadata.getS3StorageClass().get()));
            }
            LOG.debug("Uploading object with metadata {}", objectMetadata);
            if (fileSizeBytes > configuration.getMaxSingleUploadSizeBytes()) {
                multipartUpload(key, file.toFile(), objectMetadata, maybeStorageClass);
            } else {
                PutObjectRequest putObjectRequest = new PutObjectRequest(bucketName, key, file.toFile()).withMetadata(objectMetadata);
                if (maybeStorageClass.isPresent()) {
                    putObjectRequest.setStorageClass(maybeStorageClass.get());
                }
                if (uploadMetadata.getEncryptionKey().isPresent()) {
                    putObjectRequest.withSSEAwsKeyManagementParams(new SSEAwsKeyManagementParams(uploadMetadata.getEncryptionKey().get()));
                }
                s3Client.putObject(putObjectRequest);
            }
        } catch (AmazonS3Exception se) {
            LOG.warn("{} Couldn't upload {} due to {} - {}", logIdentifier, file, se.getErrorCode(), se.getErrorMessage(), se);
            throw se;
        } catch (Exception e) {
            LOG.warn("Exception uploading {}", file, e);
            throw e;
        }
        LOG.info("{} Uploaded {} in {}", logIdentifier, key, JavaUtils.duration(start));
        return true;
    });
}
Also used : StorageClass(com.amazonaws.services.s3.model.StorageClass) AmazonS3Exception(com.amazonaws.services.s3.model.AmazonS3Exception) AmazonS3Exception(com.amazonaws.services.s3.model.AmazonS3Exception) SSEAwsKeyManagementParams(com.amazonaws.services.s3.model.SSEAwsKeyManagementParams) SingularityS3UploaderContentHeaders(com.hubspot.singularity.s3uploader.config.SingularityS3UploaderContentHeaders) ObjectMetadata(com.amazonaws.services.s3.model.ObjectMetadata) PutObjectRequest(com.amazonaws.services.s3.model.PutObjectRequest)

Example 7 with StorageClass

use of com.amazonaws.services.s3.model.StorageClass in project Singularity by HubSpot.

the class SingularityS3Uploader method multipartUpload.

private void multipartUpload(String key, File file, ObjectMetadata objectMetadata, Optional<StorageClass> maybeStorageClass) throws Exception {
    List<PartETag> partETags = new ArrayList<>();
    InitiateMultipartUploadRequest initRequest = new InitiateMultipartUploadRequest(bucketName, key, objectMetadata);
    if (maybeStorageClass.isPresent()) {
        initRequest.setStorageClass(maybeStorageClass.get());
    }
    InitiateMultipartUploadResult initResponse = s3Client.initiateMultipartUpload(initRequest);
    long contentLength = file.length();
    long partSize = configuration.getUploadPartSize();
    try {
        long filePosition = 0;
        for (int i = 1; filePosition < contentLength; i++) {
            partSize = Math.min(partSize, (contentLength - filePosition));
            UploadPartRequest uploadRequest = new UploadPartRequest().withBucketName(bucketName).withKey(key).withUploadId(initResponse.getUploadId()).withPartNumber(i).withFileOffset(filePosition).withFile(file).withPartSize(partSize);
            partETags.add(s3Client.uploadPart(uploadRequest).getPartETag());
            filePosition += partSize;
        }
        CompleteMultipartUploadRequest completeRequest = new CompleteMultipartUploadRequest(bucketName, key, initResponse.getUploadId(), partETags);
        s3Client.completeMultipartUpload(completeRequest);
    } catch (Exception e) {
        s3Client.abortMultipartUpload(new AbortMultipartUploadRequest(bucketName, key, initResponse.getUploadId()));
        Throwables.propagate(e);
    }
}
Also used : InitiateMultipartUploadResult(com.amazonaws.services.s3.model.InitiateMultipartUploadResult) ArrayList(java.util.ArrayList) InitiateMultipartUploadRequest(com.amazonaws.services.s3.model.InitiateMultipartUploadRequest) UploadPartRequest(com.amazonaws.services.s3.model.UploadPartRequest) AbortMultipartUploadRequest(com.amazonaws.services.s3.model.AbortMultipartUploadRequest) PartETag(com.amazonaws.services.s3.model.PartETag) AmazonS3Exception(com.amazonaws.services.s3.model.AmazonS3Exception) CompleteMultipartUploadRequest(com.amazonaws.services.s3.model.CompleteMultipartUploadRequest)

Example 8 with StorageClass

use of com.amazonaws.services.s3.model.StorageClass in project herd by FINRAOS.

the class S3DaoImpl method validateGlacierS3FilesRestored.

@Override
public void validateGlacierS3FilesRestored(S3FileTransferRequestParamsDto params) throws RuntimeException {
    LOGGER.info("Checking for already restored Glacier storage class objects... s3KeyPrefix=\"{}\" s3BucketName=\"{}\" s3KeyCount={}", params.getS3KeyPrefix(), params.getS3BucketName(), params.getFiles().size());
    if (!CollectionUtils.isEmpty(params.getFiles())) {
        // Initialize a key value pair for the error message in the catch block.
        String key = params.getFiles().get(0).getPath().replaceAll("\\\\", "/");
        try {
            // Create an S3 client.
            AmazonS3Client s3Client = getAmazonS3(params);
            try {
                for (File file : params.getFiles()) {
                    key = file.getPath().replaceAll("\\\\", "/");
                    ObjectMetadata objectMetadata = s3Operations.getObjectMetadata(params.getS3BucketName(), key, s3Client);
                    // Fail if a not already restored object is detected.
                    if (BooleanUtils.isNotFalse(objectMetadata.getOngoingRestore())) {
                        throw new IllegalArgumentException(String.format("Archived Glacier S3 file \"%s\" is not restored. StorageClass {%s}, OngoingRestore flag {%s}, S3 bucket name {%s}", key, objectMetadata.getStorageClass(), objectMetadata.getOngoingRestore(), params.getS3BucketName()));
                    }
                }
            } finally {
                s3Client.shutdown();
            }
        } catch (AmazonServiceException e) {
            throw new IllegalStateException(String.format("Fail to check restore status for \"%s\" key in \"%s\" bucket. Reason: %s", key, params.getS3BucketName(), e.getMessage()), e);
        }
    }
}
Also used : AmazonS3Client(com.amazonaws.services.s3.AmazonS3Client) AmazonServiceException(com.amazonaws.AmazonServiceException) File(java.io.File) ObjectMetadata(com.amazonaws.services.s3.model.ObjectMetadata)

Example 9 with StorageClass

use of com.amazonaws.services.s3.model.StorageClass in project herd by FINRAOS.

the class S3DaoTest method testValidateGlacierS3FilesRestoredGlacierObjectRestoreNotInitiated.

@Test
public void testValidateGlacierS3FilesRestoredGlacierObjectRestoreNotInitiated() {
    // Put a 1 byte Glacier storage class file in S3 that has no restore initiated (OngoingRestore flag is null).
    ObjectMetadata metadata = new ObjectMetadata();
    metadata.setHeader(Headers.STORAGE_CLASS, StorageClass.Glacier);
    s3Operations.putObject(new PutObjectRequest(storageDaoTestHelper.getS3ManagedBucketName(), TARGET_S3_KEY, new ByteArrayInputStream(new byte[1]), metadata), null);
    // Try to validate if the Glacier S3 file is already restored.
    try {
        S3FileTransferRequestParamsDto params = new S3FileTransferRequestParamsDto();
        params.setS3BucketName(storageDaoTestHelper.getS3ManagedBucketName());
        params.setFiles(Arrays.asList(new File(TARGET_S3_KEY)));
        s3Dao.validateGlacierS3FilesRestored(params);
        fail("Should throw an IllegalArgumentException when Glacier S3 file is not restored.");
    } catch (IllegalArgumentException e) {
        assertEquals(String.format("Archived Glacier S3 file \"%s\" is not restored. StorageClass {GLACIER}, OngoingRestore flag {null}, S3 bucket name {%s}", TARGET_S3_KEY, storageDaoTestHelper.getS3ManagedBucketName()), e.getMessage());
    }
}
Also used : S3FileTransferRequestParamsDto(org.finra.herd.model.dto.S3FileTransferRequestParamsDto) ByteArrayInputStream(java.io.ByteArrayInputStream) ObjectMetadata(com.amazonaws.services.s3.model.ObjectMetadata) File(java.io.File) PutObjectRequest(com.amazonaws.services.s3.model.PutObjectRequest) Test(org.junit.Test)

Example 10 with StorageClass

use of com.amazonaws.services.s3.model.StorageClass in project opentest by mcdcorp.

the class GetS3Metadata method run.

@Override
public void run() {
    super.run();
    String awsCredentialsProfile = this.readStringArgument("awsProfile", "default");
    String bucket = this.readStringArgument("bucket");
    String objectKey = this.readStringArgument("objectKey");
    AmazonS3 s3Client = new AmazonS3Client(new ProfileCredentialsProvider(awsCredentialsProfile));
    ObjectMetadata metadata = s3Client.getObjectMetadata(new GetObjectMetadataRequest(bucket, objectKey));
    try {
        Date expirationTime = metadata.getExpirationTime();
        if (expirationTime != null) {
            this.writeOutput("expirationTime", metadata.getExpirationTime().getTime());
        } else {
            this.writeOutput("expirationTime", null);
        }
        this.writeOutput("lastModified", metadata.getLastModified().getTime());
        this.writeOutput("userMetadata", metadata.getUserMetadata());
        this.writeOutput("size", metadata.getContentLength());
        this.writeOutput("storageClass", metadata.getStorageClass());
        this.writeOutput("versionId", metadata.getVersionId());
    } catch (Exception ex) {
        throw new RuntimeException(String.format("Failed to get object metadata for object key %s in bucket %s", objectKey, bucket), ex);
    }
}
Also used : AmazonS3(com.amazonaws.services.s3.AmazonS3) AmazonS3Client(com.amazonaws.services.s3.AmazonS3Client) GetObjectMetadataRequest(com.amazonaws.services.s3.model.GetObjectMetadataRequest) ProfileCredentialsProvider(com.amazonaws.auth.profile.ProfileCredentialsProvider) ObjectMetadata(com.amazonaws.services.s3.model.ObjectMetadata) Date(java.util.Date)

Aggregations

ObjectMetadata (com.amazonaws.services.s3.model.ObjectMetadata)9 PutObjectRequest (com.amazonaws.services.s3.model.PutObjectRequest)6 File (java.io.File)5 AmazonS3Client (com.amazonaws.services.s3.AmazonS3Client)3 AccessControlList (com.amazonaws.services.s3.model.AccessControlList)3 AmazonS3Exception (com.amazonaws.services.s3.model.AmazonS3Exception)3 CannedAccessControlList (com.amazonaws.services.s3.model.CannedAccessControlList)3 CompleteMultipartUploadRequest (com.amazonaws.services.s3.model.CompleteMultipartUploadRequest)3 InitiateMultipartUploadRequest (com.amazonaws.services.s3.model.InitiateMultipartUploadRequest)3 InitiateMultipartUploadResult (com.amazonaws.services.s3.model.InitiateMultipartUploadResult)3 PartETag (com.amazonaws.services.s3.model.PartETag)3 UploadPartRequest (com.amazonaws.services.s3.model.UploadPartRequest)3 ByteArrayInputStream (java.io.ByteArrayInputStream)3 AbortMultipartUploadRequest (com.amazonaws.services.s3.model.AbortMultipartUploadRequest)2 CompleteMultipartUploadResult (com.amazonaws.services.s3.model.CompleteMultipartUploadResult)2 PutObjectResult (com.amazonaws.services.s3.model.PutObjectResult)2 FileInputStream (java.io.FileInputStream)2 InputStream (java.io.InputStream)2 ArrayList (java.util.ArrayList)2 Message (org.apache.camel.Message)2