use of com.amazonaws.services.s3.model.BucketLifecycleConfiguration.Transition in project herd by FINRAOS.
the class StoragePolicyProcessorJmsMessageListenerTest method testProcessMessage.
@Test
public void testProcessMessage() throws Exception {
// Build the expected S3 key prefix for test business object data.
String s3KeyPrefix = getExpectedS3KeyPrefix(BDEF_NAMESPACE, DATA_PROVIDER_NAME, BDEF_NAME, FORMAT_USAGE_CODE, FORMAT_FILE_TYPE_CODE, FORMAT_VERSION, PARTITION_KEY, PARTITION_VALUE, null, null, DATA_VERSION);
// Create S3FileTransferRequestParamsDto to access the source S3 bucket location.
// Since test S3 key prefix represents a directory, we add a trailing '/' character to it.
S3FileTransferRequestParamsDto sourceS3FileTransferRequestParamsDto = S3FileTransferRequestParamsDto.builder().withS3BucketName(S3_BUCKET_NAME).withS3KeyPrefix(s3KeyPrefix + "/").build();
// Create and persist the relative database entities.
storagePolicyServiceTestHelper.createDatabaseEntitiesForStoragePolicyTesting(STORAGE_POLICY_NAMESPACE_CD, Arrays.asList(STORAGE_POLICY_RULE_TYPE), BDEF_NAMESPACE, BDEF_NAME, Arrays.asList(FORMAT_FILE_TYPE_CODE), Arrays.asList(STORAGE_NAME), Arrays.asList(StoragePolicyTransitionTypeEntity.GLACIER));
// Create a business object data key.
BusinessObjectDataKey businessObjectDataKey = new BusinessObjectDataKey(BDEF_NAMESPACE, BDEF_NAME, FORMAT_USAGE_CODE, FORMAT_FILE_TYPE_CODE, FORMAT_VERSION, PARTITION_VALUE, NO_SUBPARTITION_VALUES, DATA_VERSION);
// Create and persist an ENABLED storage unit in the source storage.
StorageUnitEntity sourceStorageUnitEntity = storageUnitDaoTestHelper.createStorageUnitEntity(STORAGE_NAME, businessObjectDataKey, LATEST_VERSION_FLAG_SET, BusinessObjectDataStatusEntity.VALID, StorageUnitStatusEntity.ENABLED, NO_STORAGE_DIRECTORY_PATH);
// Add storage files to the source storage unit.
for (String filePath : LOCAL_FILES) {
storageFileDaoTestHelper.createStorageFileEntity(sourceStorageUnitEntity, s3KeyPrefix + "/" + filePath, FILE_SIZE_1_KB, ROW_COUNT_1000);
}
// Get the source storage files.
List<StorageFile> sourceStorageFiles = storageFileHelper.createStorageFilesFromEntities(sourceStorageUnitEntity.getStorageFiles());
// Create a storage policy key.
StoragePolicyKey storagePolicyKey = new StoragePolicyKey(STORAGE_POLICY_NAMESPACE_CD, STORAGE_POLICY_NAME);
// Create and persist a storage policy entity.
storagePolicyDaoTestHelper.createStoragePolicyEntity(storagePolicyKey, STORAGE_POLICY_RULE_TYPE, STORAGE_POLICY_RULE_VALUE, BDEF_NAMESPACE, BDEF_NAME, FORMAT_USAGE_CODE, FORMAT_FILE_TYPE_CODE, STORAGE_NAME, StoragePolicyTransitionTypeEntity.GLACIER, StoragePolicyStatusEntity.ENABLED, INITIAL_VERSION, LATEST_VERSION_FLAG_SET);
// Override configuration to specify some settings required for testing.
Map<String, Object> overrideMap = new HashMap<>();
overrideMap.put(ConfigurationValue.S3_ARCHIVE_TO_GLACIER_ROLE_ARN.getKey(), S3_OBJECT_TAGGER_ROLE_ARN);
overrideMap.put(ConfigurationValue.S3_ARCHIVE_TO_GLACIER_ROLE_SESSION_NAME.getKey(), S3_OBJECT_TAGGER_ROLE_SESSION_NAME);
modifyPropertySourceInEnvironment(overrideMap);
try {
// Put relative S3 files into the source S3 bucket.
for (StorageFile storageFile : sourceStorageFiles) {
s3Operations.putObject(new PutObjectRequest(S3_BUCKET_NAME, storageFile.getFilePath(), new ByteArrayInputStream(new byte[storageFile.getFileSizeBytes().intValue()]), null), null);
}
// Perform a storage policy transition.
storagePolicyProcessorJmsMessageListener.processMessage(jsonHelper.objectToJson(new StoragePolicySelection(businessObjectDataKey, storagePolicyKey, INITIAL_VERSION)), null);
// Validate the status of the source storage unit.
assertEquals(StorageUnitStatusEntity.ARCHIVED, sourceStorageUnitEntity.getStatus().getCode());
} finally {
// Delete test files from S3 storage.
if (!s3Dao.listDirectory(sourceS3FileTransferRequestParamsDto).isEmpty()) {
s3Dao.deleteDirectory(sourceS3FileTransferRequestParamsDto);
}
s3Operations.rollback();
// Restore the property sources so we don't affect other tests.
restorePropertySourceInEnvironment();
}
}
use of com.amazonaws.services.s3.model.BucketLifecycleConfiguration.Transition in project aws-doc-sdk-examples by awsdocs.
the class LifecycleConfiguration method main.
public static void main(String[] args) throws IOException {
Regions clientRegion = Regions.DEFAULT_REGION;
String bucketName = "*** Bucket name ***";
// Create a rule to archive objects with the "glacierobjects/" prefix to Glacier immediately.
BucketLifecycleConfiguration.Rule rule1 = new BucketLifecycleConfiguration.Rule().withId("Archive immediately rule").withFilter(new LifecycleFilter(new LifecyclePrefixPredicate("glacierobjects/"))).addTransition(new Transition().withDays(0).withStorageClass(StorageClass.Glacier)).withStatus(BucketLifecycleConfiguration.ENABLED);
// Create a rule to transition objects to the Standard-Infrequent Access storage class
// after 30 days, then to Glacier after 365 days. Amazon S3 will delete the objects after 3650 days.
// The rule applies to all objects with the tag "archive" set to "true".
BucketLifecycleConfiguration.Rule rule2 = new BucketLifecycleConfiguration.Rule().withId("Archive and then delete rule").withFilter(new LifecycleFilter(new LifecycleTagPredicate(new Tag("archive", "true")))).addTransition(new Transition().withDays(30).withStorageClass(StorageClass.StandardInfrequentAccess)).addTransition(new Transition().withDays(365).withStorageClass(StorageClass.Glacier)).withExpirationInDays(3650).withStatus(BucketLifecycleConfiguration.ENABLED);
// Add the rules to a new BucketLifecycleConfiguration.
BucketLifecycleConfiguration configuration = new BucketLifecycleConfiguration().withRules(Arrays.asList(rule1, rule2));
try {
AmazonS3 s3Client = AmazonS3ClientBuilder.standard().withCredentials(new ProfileCredentialsProvider()).withRegion(clientRegion).build();
// Save the configuration.
s3Client.setBucketLifecycleConfiguration(bucketName, configuration);
// Retrieve the configuration.
configuration = s3Client.getBucketLifecycleConfiguration(bucketName);
// Add a new rule with both a prefix predicate and a tag predicate.
configuration.getRules().add(new BucketLifecycleConfiguration.Rule().withId("NewRule").withFilter(new LifecycleFilter(new LifecycleAndOperator(Arrays.asList(new LifecyclePrefixPredicate("YearlyDocuments/"), new LifecycleTagPredicate(new Tag("expire_after", "ten_years")))))).withExpirationInDays(3650).withStatus(BucketLifecycleConfiguration.ENABLED));
// Save the configuration.
s3Client.setBucketLifecycleConfiguration(bucketName, configuration);
// Retrieve the configuration.
configuration = s3Client.getBucketLifecycleConfiguration(bucketName);
// Verify that the configuration now has three rules.
configuration = s3Client.getBucketLifecycleConfiguration(bucketName);
System.out.println("Expected # of rules = 3; found: " + configuration.getRules().size());
// Delete the configuration.
s3Client.deleteBucketLifecycleConfiguration(bucketName);
// Verify that the configuration has been deleted by attempting to retrieve it.
configuration = s3Client.getBucketLifecycleConfiguration(bucketName);
String s = (configuration == null) ? "No configuration found." : "Configuration found.";
System.out.println(s);
} catch (AmazonServiceException e) {
// The call was transmitted successfully, but Amazon S3 couldn't process
// it, so it returned an error response.
e.printStackTrace();
} catch (SdkClientException e) {
// Amazon S3 couldn't be contacted for a response, or the client
// couldn't parse the response from Amazon S3.
e.printStackTrace();
}
}
Aggregations