Search in sources :

Example 51 with ObjectMetadata

use of com.talend.shaded.com.amazonaws.services.s3.model.ObjectMetadata in project herd by FINRAOS.

the class MockS3OperationsImpl method restoreObject.

@Override
public void restoreObject(RestoreObjectRequest requestRestore, AmazonS3 s3Client) {
    if (requestRestore.getKey().endsWith(MockAwsOperationsHelper.AMAZON_THROTTLING_EXCEPTION)) {
        AmazonServiceException throttlingException = new AmazonServiceException("test throttling exception");
        throttlingException.setErrorCode("ThrottlingException");
        throw throttlingException;
    } else if (MOCK_S3_BUCKET_NAME_NO_SUCH_BUCKET_EXCEPTION.equals(requestRestore.getBucketName())) {
        AmazonServiceException amazonServiceException = new AmazonServiceException(S3Operations.ERROR_CODE_NO_SUCH_BUCKET);
        amazonServiceException.setStatusCode(404);
        throw amazonServiceException;
    } else if (MOCK_S3_BUCKET_NAME_ACCESS_DENIED.equals(requestRestore.getBucketName())) {
        AmazonServiceException amazonServiceException = new AmazonServiceException(S3Operations.ERROR_CODE_ACCESS_DENIED);
        amazonServiceException.setStatusCode(403);
        throw amazonServiceException;
    } else if (MOCK_S3_BUCKET_NAME_INTERNAL_ERROR.equals(requestRestore.getBucketName()) || requestRestore.getKey().endsWith(MOCK_S3_FILE_NAME_SERVICE_EXCEPTION)) {
        throw new AmazonServiceException(S3Operations.ERROR_CODE_INTERNAL_ERROR);
    } else {
        MockS3Bucket mockS3Bucket = getOrCreateBucket(requestRestore.getBucketName());
        MockS3Object mockS3Object = mockS3Bucket.getObjects().get(requestRestore.getKey());
        if (mockS3Object == null) {
            AmazonServiceException amazonServiceException = new AmazonServiceException(S3Operations.ERROR_CODE_NO_SUCH_KEY);
            amazonServiceException.setStatusCode(404);
            throw amazonServiceException;
        }
        // Get object metadata.
        ObjectMetadata objectMetadata = mockS3Object.getObjectMetadata();
        // Fail if the object is not in Glacier.
        if (!StorageClass.Glacier.toString().equals(objectMetadata.getStorageClass())) {
            AmazonServiceException amazonServiceException = new AmazonServiceException("object is not in Glacier");
            throw amazonServiceException;
        }
        // Fail if the object is already being restored.
        if (objectMetadata.getOngoingRestore()) {
            AmazonServiceException amazonServiceException = new AmazonServiceException("object is already being restored");
            throw amazonServiceException;
        }
        // Update the object metadata to indicate that there is an ongoing restore request.
        objectMetadata.setOngoingRestore(true);
    }
}
Also used : AmazonServiceException(com.amazonaws.AmazonServiceException) ObjectMetadata(com.amazonaws.services.s3.model.ObjectMetadata)

Example 52 with ObjectMetadata

use of com.talend.shaded.com.amazonaws.services.s3.model.ObjectMetadata in project herd by FINRAOS.

the class BusinessObjectDataFinalizeRestoreHelperServiceTest method testExecuteS3SpecificStepsGlacierS3FileStillRestoring.

@Test
public void testExecuteS3SpecificStepsGlacierS3FileStillRestoring() throws Exception {
    // Create S3FileTransferRequestParamsDto to access the S3 bucket.
    // Since test S3 key prefix represents a directory, we add a trailing '/' character to it.
    S3FileTransferRequestParamsDto s3FileTransferRequestParamsDto = S3FileTransferRequestParamsDto.builder().withS3BucketName(S3_BUCKET_NAME).withS3KeyPrefix(TEST_S3_KEY_PREFIX + "/").build();
    // Create a business object data key.
    BusinessObjectDataKey businessObjectDataKey = new BusinessObjectDataKey(BDEF_NAMESPACE, BDEF_NAME, FORMAT_USAGE_CODE, FORMAT_FILE_TYPE_CODE, FORMAT_VERSION, PARTITION_VALUE, SUBPARTITION_VALUES, DATA_VERSION);
    // Create a business object data restore DTO.
    BusinessObjectDataRestoreDto businessObjectDataRestoreDto = new BusinessObjectDataRestoreDto(businessObjectDataKey, STORAGE_NAME, NO_S3_ENDPOINT, S3_BUCKET_NAME, TEST_S3_KEY_PREFIX, NO_STORAGE_UNIT_STATUS, NO_STORAGE_UNIT_STATUS, Arrays.asList(new StorageFile(TEST_S3_KEY_PREFIX + "/" + LOCAL_FILE, FILE_SIZE_1_KB, NO_ROW_COUNT)), NO_EXCEPTION);
    try {
        // Put a "still restoring" Glacier storage class S3 file in the Glacier S3 bucket.
        ObjectMetadata metadata = new ObjectMetadata();
        metadata.setHeader(Headers.STORAGE_CLASS, StorageClass.Glacier);
        metadata.setOngoingRestore(true);
        s3Operations.putObject(new PutObjectRequest(S3_BUCKET_NAME, String.format("%s/%s", TEST_S3_KEY_PREFIX, LOCAL_FILE), new ByteArrayInputStream(new byte[(int) FILE_SIZE_1_KB]), metadata), NO_S3_CLIENT);
        // Try to execute S3 specific steps to finalize a restore for the storage unit when Glacier S3 file is still restoring.
        try {
            businessObjectDataFinalizeRestoreHelperService.executeS3SpecificSteps(businessObjectDataRestoreDto);
            fail();
        } catch (IllegalArgumentException e) {
            assertEquals(String.format("Archived Glacier S3 file \"%s/%s\" is not restored. " + "StorageClass {GLACIER}, OngoingRestore flag {true}, S3 bucket name {%s}", TEST_S3_KEY_PREFIX, LOCAL_FILE, S3_BUCKET_NAME), e.getMessage());
        }
        // Validate that we have a Glacier S3 file at the expected S3 location.
        assertEquals(1, s3Dao.listDirectory(s3FileTransferRequestParamsDto).size());
    } finally {
        // Delete test files from S3 storage.
        if (!s3Dao.listDirectory(s3FileTransferRequestParamsDto).isEmpty()) {
            s3Dao.deleteDirectory(s3FileTransferRequestParamsDto);
        }
        s3Operations.rollback();
    }
}
Also used : S3FileTransferRequestParamsDto(org.finra.herd.model.dto.S3FileTransferRequestParamsDto) ByteArrayInputStream(java.io.ByteArrayInputStream) StorageFile(org.finra.herd.model.api.xml.StorageFile) BusinessObjectDataKey(org.finra.herd.model.api.xml.BusinessObjectDataKey) BusinessObjectDataRestoreDto(org.finra.herd.model.dto.BusinessObjectDataRestoreDto) ObjectMetadata(com.amazonaws.services.s3.model.ObjectMetadata) PutObjectRequest(com.amazonaws.services.s3.model.PutObjectRequest) Test(org.junit.Test)

Example 53 with ObjectMetadata

use of com.talend.shaded.com.amazonaws.services.s3.model.ObjectMetadata in project herd by FINRAOS.

the class BusinessObjectDataFinalizeRestoreHelperServiceTest method testExecuteS3SpecificSteps.

@Test
public void testExecuteS3SpecificSteps() throws Exception {
    // Create S3FileTransferRequestParamsDto to access the S3 bucket.
    // Since test S3 key prefix represents a directory, we add a trailing '/' character to it.
    S3FileTransferRequestParamsDto s3FileTransferRequestParamsDto = S3FileTransferRequestParamsDto.builder().withS3BucketName(S3_BUCKET_NAME).withS3KeyPrefix(TEST_S3_KEY_PREFIX + "/").build();
    // Create a business object data key.
    BusinessObjectDataKey businessObjectDataKey = new BusinessObjectDataKey(BDEF_NAMESPACE, BDEF_NAME, FORMAT_USAGE_CODE, FORMAT_FILE_TYPE_CODE, FORMAT_VERSION, PARTITION_VALUE, SUBPARTITION_VALUES, DATA_VERSION);
    // Create a list of storage files.
    List<StorageFile> storageFiles = new ArrayList<>();
    for (String filePath : LOCAL_FILES) {
        storageFiles.add(new StorageFile(TEST_S3_KEY_PREFIX + "/" + filePath, FILE_SIZE_1_KB, NO_ROW_COUNT));
    }
    // Create a business object data restore DTO.
    BusinessObjectDataRestoreDto businessObjectDataRestoreDto = new BusinessObjectDataRestoreDto(businessObjectDataKey, STORAGE_NAME, NO_S3_ENDPOINT, S3_BUCKET_NAME, TEST_S3_KEY_PREFIX, NO_STORAGE_UNIT_STATUS, NO_STORAGE_UNIT_STATUS, storageFiles, NO_EXCEPTION);
    try {
        // Put relative Glacier storage class S3 files in the S3 bucket.
        for (StorageFile storageFile : storageFiles) {
            ObjectMetadata metadata = new ObjectMetadata();
            metadata.setHeader(Headers.STORAGE_CLASS, StorageClass.Glacier);
            metadata.setOngoingRestore(false);
            s3Operations.putObject(new PutObjectRequest(S3_BUCKET_NAME, storageFile.getFilePath(), new ByteArrayInputStream(new byte[storageFile.getFileSizeBytes().intValue()]), metadata), NO_S3_CLIENT);
        }
        // Execute S3 specific steps to finalize a restore for the Glacier storage unit.
        businessObjectDataFinalizeRestoreHelperService.executeS3SpecificSteps(businessObjectDataRestoreDto);
        // Validate that we have the restored S3 files at the expected S3 location.
        assertEquals(storageFiles.size(), s3Dao.listDirectory(s3FileTransferRequestParamsDto).size());
    } finally {
        // Delete test files from S3 storage.
        if (!s3Dao.listDirectory(s3FileTransferRequestParamsDto).isEmpty()) {
            s3Dao.deleteDirectory(s3FileTransferRequestParamsDto);
        }
        s3Operations.rollback();
    }
}
Also used : S3FileTransferRequestParamsDto(org.finra.herd.model.dto.S3FileTransferRequestParamsDto) ByteArrayInputStream(java.io.ByteArrayInputStream) StorageFile(org.finra.herd.model.api.xml.StorageFile) ArrayList(java.util.ArrayList) BusinessObjectDataKey(org.finra.herd.model.api.xml.BusinessObjectDataKey) BusinessObjectDataRestoreDto(org.finra.herd.model.dto.BusinessObjectDataRestoreDto) ObjectMetadata(com.amazonaws.services.s3.model.ObjectMetadata) PutObjectRequest(com.amazonaws.services.s3.model.PutObjectRequest) Test(org.junit.Test)

Example 54 with ObjectMetadata

use of com.talend.shaded.com.amazonaws.services.s3.model.ObjectMetadata in project herd by FINRAOS.

the class S3DaoTest method testRestoreObjects.

@Test
public void testRestoreObjects() {
    // Put a 1 byte Glacier storage class file in S3.
    ObjectMetadata metadata = new ObjectMetadata();
    metadata.setHeader(Headers.STORAGE_CLASS, StorageClass.Glacier);
    metadata.setOngoingRestore(false);
    s3Operations.putObject(new PutObjectRequest(storageDaoTestHelper.getS3ManagedBucketName(), TARGET_S3_KEY, new ByteArrayInputStream(new byte[1]), metadata), null);
    // Initiate a restore request for the test S3 file.
    S3FileTransferRequestParamsDto params = new S3FileTransferRequestParamsDto();
    params.setS3BucketName(storageDaoTestHelper.getS3ManagedBucketName());
    params.setFiles(Arrays.asList(new File(TARGET_S3_KEY)));
    s3Dao.restoreObjects(params, S3_RESTORE_OBJECT_EXPIRATION_IN_DAYS);
    // Validate that there is an ongoing restore request for this object.
    ObjectMetadata objectMetadata = s3Operations.getObjectMetadata(storageDaoTestHelper.getS3ManagedBucketName(), TARGET_S3_KEY, null);
    assertTrue(objectMetadata.getOngoingRestore());
}
Also used : S3FileTransferRequestParamsDto(org.finra.herd.model.dto.S3FileTransferRequestParamsDto) ByteArrayInputStream(java.io.ByteArrayInputStream) ObjectMetadata(com.amazonaws.services.s3.model.ObjectMetadata) File(java.io.File) PutObjectRequest(com.amazonaws.services.s3.model.PutObjectRequest) Test(org.junit.Test)

Example 55 with ObjectMetadata

use of com.talend.shaded.com.amazonaws.services.s3.model.ObjectMetadata in project herd by FINRAOS.

the class S3DaoTest method testS3FileExists.

/**
 * The method is successful when both bucket and key exists.
 */
@Test
public void testS3FileExists() {
    String expectedKey = "foo";
    String expectedValue = "bar";
    ByteArrayInputStream inputStream = new ByteArrayInputStream((expectedKey + "=" + expectedValue).getBytes());
    PutObjectRequest putObjectRequest = new PutObjectRequest(S3_BUCKET_NAME, TARGET_S3_KEY, inputStream, new ObjectMetadata());
    s3Operations.putObject(putObjectRequest, null);
    S3FileTransferRequestParamsDto params = s3DaoTestHelper.getTestS3FileTransferRequestParamsDto();
    params.setS3BucketName(S3_BUCKET_NAME);
    params.setS3KeyPrefix(TARGET_S3_KEY);
    Assert.assertTrue(s3Dao.s3FileExists(params));
}
Also used : S3FileTransferRequestParamsDto(org.finra.herd.model.dto.S3FileTransferRequestParamsDto) ByteArrayInputStream(java.io.ByteArrayInputStream) ObjectMetadata(com.amazonaws.services.s3.model.ObjectMetadata) PutObjectRequest(com.amazonaws.services.s3.model.PutObjectRequest) Test(org.junit.Test)

Aggregations

ObjectMetadata (com.amazonaws.services.s3.model.ObjectMetadata)163 PutObjectRequest (com.amazonaws.services.s3.model.PutObjectRequest)76 ByteArrayInputStream (java.io.ByteArrayInputStream)52 Test (org.junit.Test)47 IOException (java.io.IOException)33 File (java.io.File)27 AmazonClientException (com.amazonaws.AmazonClientException)25 AmazonServiceException (com.amazonaws.AmazonServiceException)22 S3FileTransferRequestParamsDto (org.finra.herd.model.dto.S3FileTransferRequestParamsDto)21 InputStream (java.io.InputStream)20 DataStoreException (org.apache.jackrabbit.core.data.DataStoreException)18 PutObjectResult (com.amazonaws.services.s3.model.PutObjectResult)15 Upload (com.amazonaws.services.s3.transfer.Upload)15 CopyObjectRequest (com.amazonaws.services.s3.model.CopyObjectRequest)11 AmazonS3Client (com.amazonaws.services.s3.AmazonS3Client)10 Date (java.util.Date)9 BusinessObjectDataKey (org.finra.herd.model.api.xml.BusinessObjectDataKey)9 Copy (com.amazonaws.services.s3.transfer.Copy)8 S3Object (com.amazonaws.services.s3.model.S3Object)7 InterruptedIOException (java.io.InterruptedIOException)7