use of software.amazon.awssdk.services.s3.model.PutObjectRequest in project herd by FINRAOS.
the class S3DaoTest method testTagObjects.
@Test
public void testTagObjects() {
// Create an S3 object tag.
Tag tag = new Tag(S3_OBJECT_TAG_KEY, S3_OBJECT_TAG_VALUE);
// Put a file in S3.
s3Operations.putObject(new PutObjectRequest(S3_BUCKET_NAME, TARGET_S3_KEY, new ByteArrayInputStream(new byte[1]), new ObjectMetadata()), null);
// Tag the file with an S3 object tag.
S3FileTransferRequestParamsDto params = new S3FileTransferRequestParamsDto();
params.setS3BucketName(S3_BUCKET_NAME);
params.setFiles(Arrays.asList(new File(TARGET_S3_KEY)));
s3Dao.tagObjects(params, new S3FileTransferRequestParamsDto(), tag);
// Validate that the object got tagged.
GetObjectTaggingResult getObjectTaggingResult = s3Operations.getObjectTagging(new GetObjectTaggingRequest(S3_BUCKET_NAME, TARGET_S3_KEY), null);
assertEquals(Arrays.asList(tag), getObjectTaggingResult.getTagSet());
}
use of software.amazon.awssdk.services.s3.model.PutObjectRequest in project herd by FINRAOS.
the class BusinessObjectDataServiceDestroyBusinessObjectDataTest method testDestroyBusinessObjectData.
@Test
public void testDestroyBusinessObjectData() throws Exception {
// Create a primary partition value that satisfies the retention threshold check.
String primaryPartitionValue = DateFormatUtils.format(DateUtils.addDays(new Date(), -1 * (RETENTION_PERIOD_DAYS + 1)), AbstractHerdDao.DEFAULT_SINGLE_DAY_DATE_MASK);
// Build the expected S3 key prefix for test business object data.
String s3KeyPrefix = getExpectedS3KeyPrefix(BDEF_NAMESPACE, DATA_PROVIDER_NAME, BDEF_NAME, FORMAT_USAGE_CODE, FORMAT_FILE_TYPE_CODE, FORMAT_VERSION, PARTITION_KEY, primaryPartitionValue, null, null, DATA_VERSION);
// Create S3FileTransferRequestParamsDto to access the S3 bucket location.
// Since test S3 key prefix represents a directory, we add a trailing '/' character to it.
S3FileTransferRequestParamsDto s3FileTransferRequestParamsDto = S3FileTransferRequestParamsDto.builder().withS3BucketName(S3_BUCKET_NAME).withS3KeyPrefix(s3KeyPrefix + "/").build();
// Create an S3 storage with the relative attributes.
storageDaoTestHelper.createStorageEntity(STORAGE_NAME, StoragePlatformEntity.S3, Arrays.asList(new Attribute(configurationHelper.getProperty(ConfigurationValue.S3_ATTRIBUTE_NAME_BUCKET_NAME), AbstractServiceTest.S3_BUCKET_NAME), new Attribute(configurationHelper.getProperty(ConfigurationValue.S3_ATTRIBUTE_NAME_KEY_PREFIX_VELOCITY_TEMPLATE), AbstractServiceTest.S3_KEY_PREFIX_VELOCITY_TEMPLATE), new Attribute(configurationHelper.getProperty(ConfigurationValue.S3_ATTRIBUTE_NAME_VALIDATE_PATH_PREFIX), Boolean.TRUE.toString()), new Attribute(configurationHelper.getProperty(ConfigurationValue.S3_ATTRIBUTE_NAME_VALIDATE_FILE_EXISTENCE), Boolean.TRUE.toString())));
// Create a business object data key.
BusinessObjectDataKey businessObjectDataKey = new BusinessObjectDataKey(BDEF_NAMESPACE, BDEF_NAME, FORMAT_USAGE_CODE, FORMAT_FILE_TYPE_CODE, FORMAT_VERSION, primaryPartitionValue, NO_SUBPARTITION_VALUES, DATA_VERSION);
// Create and persist a storage unit in the storage.
StorageUnitEntity storageUnitEntity = storageUnitDaoTestHelper.createStorageUnitEntity(STORAGE_NAME, businessObjectDataKey, LATEST_VERSION_FLAG_SET, BusinessObjectDataStatusEntity.VALID, StorageUnitStatusEntity.ENABLED, NO_STORAGE_DIRECTORY_PATH);
// Add storage files to the storage unit.
for (String filePath : LOCAL_FILES) {
storageFileDaoTestHelper.createStorageFileEntity(storageUnitEntity, s3KeyPrefix + "/" + filePath, FILE_SIZE_1_KB, ROW_COUNT_1000);
}
// Get the storage files.
List<StorageFile> storageFiles = storageFileHelper.createStorageFilesFromEntities(storageUnitEntity.getStorageFiles());
// Get the business object format entity.
BusinessObjectFormatEntity businessObjectFormatEntity = storageUnitEntity.getBusinessObjectData().getBusinessObjectFormat();
// Set the retention information for the business object format, which is the latest version business object format.
businessObjectFormatEntity.setRetentionType(retentionTypeDao.getRetentionTypeByCode(RetentionTypeEntity.PARTITION_VALUE));
businessObjectFormatEntity.setRetentionPeriodInDays(RETENTION_PERIOD_DAYS);
businessObjectFormatDao.saveAndRefresh(businessObjectFormatEntity);
// Override configuration to specify some settings required for testing.
Map<String, Object> overrideMap = new HashMap<>();
overrideMap.put(ConfigurationValue.S3_OBJECT_DELETE_TAG_KEY.getKey(), S3_OBJECT_TAG_KEY);
overrideMap.put(ConfigurationValue.S3_OBJECT_DELETE_TAG_VALUE.getKey(), S3_OBJECT_TAG_VALUE);
overrideMap.put(ConfigurationValue.S3_OBJECT_DELETE_ROLE_ARN.getKey(), S3_OBJECT_TAGGER_ROLE_ARN);
overrideMap.put(ConfigurationValue.S3_OBJECT_DELETE_ROLE_SESSION_NAME.getKey(), S3_OBJECT_TAGGER_ROLE_SESSION_NAME);
modifyPropertySourceInEnvironment(overrideMap);
try {
// Put relative S3 files into the S3 bucket.
for (StorageFile storageFile : storageFiles) {
s3Operations.putObject(new PutObjectRequest(S3_BUCKET_NAME, storageFile.getFilePath(), new ByteArrayInputStream(new byte[storageFile.getFileSizeBytes().intValue()]), null), null);
}
// Request to destroy business object data.
BusinessObjectData result = businessObjectDataService.destroyBusinessObjectData(businessObjectDataKey);
// Validate the result.
assertNotNull(result);
assertEquals(storageUnitEntity.getBusinessObjectData().getId(), Integer.valueOf(result.getId()));
// Validate the status of the storage unit entity.
assertEquals(StorageUnitStatusEntity.DISABLED, storageUnitEntity.getStatus().getCode());
// Validate the status of the business object data entity.
assertEquals(BusinessObjectDataStatusEntity.DELETED, storageUnitEntity.getBusinessObjectData().getStatus().getCode());
// Validate that all S3 files are now tagged.
for (StorageFile storageFile : storageFiles) {
GetObjectTaggingResult getObjectTaggingResult = s3Operations.getObjectTagging(new GetObjectTaggingRequest(S3_BUCKET_NAME, storageFile.getFilePath()), null);
assertEquals(Arrays.asList(new Tag(S3_OBJECT_TAG_KEY, S3_OBJECT_TAG_VALUE)), getObjectTaggingResult.getTagSet());
}
} finally {
// Delete test files from S3 storage.
if (!s3Dao.listDirectory(s3FileTransferRequestParamsDto).isEmpty()) {
s3Dao.deleteDirectory(s3FileTransferRequestParamsDto);
}
s3Operations.rollback();
// Restore the property sources so we don't affect other tests.
restorePropertySourceInEnvironment();
}
}
use of software.amazon.awssdk.services.s3.model.PutObjectRequest in project herd by FINRAOS.
the class BusinessObjectDataServiceRestoreBusinessObjectDataTest method testRestoreBusinessObjectDataNonGlacierStorageClass.
@Test
public void testRestoreBusinessObjectDataNonGlacierStorageClass() throws Exception {
// Create S3FileTransferRequestParamsDto to access the S3 bucket.
// Since test S3 key prefix represents a directory, we add a trailing '/' character to it.
S3FileTransferRequestParamsDto glacierS3FileTransferRequestParamsDto = S3FileTransferRequestParamsDto.builder().withS3BucketName(S3_BUCKET_NAME).withS3KeyPrefix(S3_BUCKET_NAME + "/" + TEST_S3_KEY_PREFIX + "/").build();
// Create a business object data key.
BusinessObjectDataKey businessObjectDataKey = new BusinessObjectDataKey(BDEF_NAMESPACE, BDEF_NAME, FORMAT_USAGE_CODE, FORMAT_FILE_TYPE_CODE, FORMAT_VERSION, PARTITION_VALUE, NO_SUBPARTITION_VALUES, DATA_VERSION);
// Create database entities required for testing.
BusinessObjectDataEntity businessObjectDataEntity = businessObjectDataServiceTestHelper.createDatabaseEntitiesForInitiateRestoreTesting(businessObjectDataKey);
// Get the storage unit entity.
StorageUnitEntity storageUnitEntity = storageUnitDaoHelper.getStorageUnitEntity(STORAGE_NAME, businessObjectDataEntity);
try {
// Put relative non-Glacier storage class files into the S3 bucket.
for (StorageFileEntity storageFileEntity : storageUnitEntity.getStorageFiles()) {
ObjectMetadata metadata = new ObjectMetadata();
metadata.setHeader(Headers.STORAGE_CLASS, StorageClass.Standard);
metadata.setOngoingRestore(false);
s3Operations.putObject(new PutObjectRequest(S3_BUCKET_NAME, storageFileEntity.getPath(), new ByteArrayInputStream(new byte[storageFileEntity.getFileSizeBytes().intValue()]), metadata), NO_S3_CLIENT);
}
// Initiate a restore request for the business object data.
BusinessObjectData businessObjectData = businessObjectDataService.restoreBusinessObjectData(businessObjectDataKey, EXPIRATION_IN_DAYS);
// Validate the returned object.
businessObjectDataServiceTestHelper.validateBusinessObjectData(businessObjectDataEntity.getId(), businessObjectDataKey, LATEST_VERSION_FLAG_SET, BDATA_STATUS, businessObjectData);
// Validate that the origin storage unit status is RESTORING.
assertEquals(StorageUnitStatusEntity.RESTORING, storageUnitEntity.getStatus().getCode());
// Validate that there is still no ongoing restore request for all non-Glacier objects.
for (StorageFileEntity storageFileEntity : storageUnitEntity.getStorageFiles()) {
ObjectMetadata objectMetadata = s3Operations.getObjectMetadata(S3_BUCKET_NAME, storageFileEntity.getPath(), NO_S3_CLIENT);
assertFalse(objectMetadata.getOngoingRestore());
}
} finally {
// Delete test files from S3 storage.
if (!s3Dao.listDirectory(glacierS3FileTransferRequestParamsDto).isEmpty()) {
s3Dao.deleteDirectory(glacierS3FileTransferRequestParamsDto);
}
s3Operations.rollback();
}
}
use of software.amazon.awssdk.services.s3.model.PutObjectRequest in project herd by FINRAOS.
the class UploadDownloadServiceTest method runPerformCompleteUploadSingleMessageTest.
private void runPerformCompleteUploadSingleMessageTest() {
uploadDownloadServiceTestHelper.createDatabaseEntitiesForUploadDownloadTesting();
UploadSingleInitiationResponse resultUploadSingleInitiationResponse = uploadDownloadService.initiateUploadSingle(uploadDownloadServiceTestHelper.createUploadSingleInitiationRequest(NAMESPACE, BDEF_NAME, FORMAT_USAGE_CODE, FORMAT_FILE_TYPE_CODE, FORMAT_VERSION, NAMESPACE, BDEF_NAME_2, FORMAT_USAGE_CODE_2, FORMAT_FILE_TYPE_CODE_2, FORMAT_VERSION_2, FILE_NAME));
// Get the file path.
String filePath = resultUploadSingleInitiationResponse.getTargetBusinessObjectData().getStorageUnits().get(0).getStorageFiles().get(0).getFilePath();
// Put a 1 KB file in the S3 "loading dock" bucket.
PutObjectRequest putObjectRequest = new PutObjectRequest(storageDaoTestHelper.getS3LoadingDockBucketName(), filePath, new ByteArrayInputStream(new byte[(int) FILE_SIZE_1_KB]), null);
s3Operations.putObject(putObjectRequest, null);
try {
// Complete the upload.
UploadDownloadServiceImpl.CompleteUploadSingleMessageResult result = uploadDownloadService.performCompleteUploadSingleMessage(filePath);
// Validate the result object.
assertEquals(BusinessObjectDataStatusEntity.UPLOADING, result.getSourceOldBusinessObjectDataStatus());
assertEquals(BusinessObjectDataStatusEntity.DELETED, result.getSourceNewBusinessObjectDataStatus());
assertEquals(BusinessObjectDataStatusEntity.UPLOADING, result.getTargetOldBusinessObjectDataStatus());
assertEquals(BusinessObjectDataStatusEntity.VALID, result.getTargetNewBusinessObjectDataStatus());
// Try to complete the upload the second time. This might happen when a duplicate S3 notification is received for the same uploaded file.
result = uploadDownloadService.performCompleteUploadSingleMessage(filePath);
// Validate the result object.
assertEquals(BusinessObjectDataStatusEntity.DELETED, result.getSourceOldBusinessObjectDataStatus());
assertNull(result.getSourceNewBusinessObjectDataStatus());
assertEquals(BusinessObjectDataStatusEntity.VALID, result.getTargetOldBusinessObjectDataStatus());
assertNull(result.getTargetNewBusinessObjectDataStatus());
} finally {
// Clean up the S3.
s3Dao.deleteDirectory(S3FileTransferRequestParamsDto.builder().withS3BucketName(storageDaoTestHelper.getS3LoadingDockBucketName()).withS3KeyPrefix(filePath).build());
s3Operations.rollback();
}
}
use of software.amazon.awssdk.services.s3.model.PutObjectRequest in project herd by FINRAOS.
the class UploadDownloadHelperServiceTest method testExecuteFileMoveAfterStepsNewTargetStatusNotValid.
@Test
public void testExecuteFileMoveAfterStepsNewTargetStatusNotValid() {
// Create and persists entities required for testing.
BusinessObjectDataEntity sourceBusinessObjectDataEntity = businessObjectDataDaoTestHelper.createBusinessObjectDataEntity(NAMESPACE, BDEF_NAME, FORMAT_USAGE_CODE, FORMAT_FILE_TYPE_CODE, INITIAL_FORMAT_VERSION, PARTITION_VALUE, INITIAL_DATA_VERSION, LATEST_VERSION_FLAG_SET, BusinessObjectDataStatusEntity.RE_ENCRYPTING);
StorageUnitEntity sourceStorageUnitEntity = storageUnitDaoTestHelper.createStorageUnitEntity(storageDaoHelper.getStorageEntity(StorageEntity.MANAGED_LOADING_DOCK_STORAGE), sourceBusinessObjectDataEntity, StorageUnitStatusEntity.ENABLED, NO_STORAGE_DIRECTORY_PATH);
storageFileDaoTestHelper.createStorageFileEntity(sourceStorageUnitEntity, TARGET_S3_KEY, FILE_SIZE_1_KB, NO_ROW_COUNT);
BusinessObjectDataEntity targetBusinessObjectDataEntity = businessObjectDataDaoTestHelper.createBusinessObjectDataEntity(NAMESPACE_2, BDEF_NAME_2, FORMAT_USAGE_CODE_2, FORMAT_FILE_TYPE_CODE_2, INITIAL_FORMAT_VERSION, PARTITION_VALUE, INITIAL_DATA_VERSION, LATEST_VERSION_FLAG_SET, BusinessObjectDataStatusEntity.RE_ENCRYPTING);
StorageUnitEntity targetStorageUnitEntity = storageUnitDaoTestHelper.createStorageUnitEntity(storageDaoHelper.getStorageEntity(StorageEntity.MANAGED_EXTERNAL_STORAGE), targetBusinessObjectDataEntity, StorageUnitStatusEntity.ENABLED, NO_STORAGE_DIRECTORY_PATH);
storageFileDaoTestHelper.createStorageFileEntity(targetStorageUnitEntity, TARGET_S3_KEY, FILE_SIZE_1_KB, NO_ROW_COUNT);
// Put a 1 KB file in the source S3 bucket.
PutObjectRequest putObjectRequest = new PutObjectRequest(storageDaoTestHelper.getS3LoadingDockBucketName(), TARGET_S3_KEY, new ByteArrayInputStream(new byte[(int) FILE_SIZE_1_KB]), null);
s3Operations.putObject(putObjectRequest, null);
// Initialize parameters required to perform the file move post steps with new target status not being set to "VALID".
CompleteUploadSingleParamsDto completeUploadSingleParamsDto = new CompleteUploadSingleParamsDto(businessObjectDataHelper.getBusinessObjectDataKey(sourceBusinessObjectDataEntity), storageDaoTestHelper.getS3LoadingDockBucketName(), TARGET_S3_KEY, BusinessObjectDataStatusEntity.UPLOADING, BusinessObjectDataStatusEntity.RE_ENCRYPTING, businessObjectDataHelper.getBusinessObjectDataKey(targetBusinessObjectDataEntity), storageDaoTestHelper.getS3ExternalBucketName(), TARGET_S3_KEY, BusinessObjectDataStatusEntity.RE_ENCRYPTING, BusinessObjectDataStatusEntity.INVALID, MockS3OperationsImpl.MOCK_KMS_ID, emrHelper.getAwsParamsDto());
// Try to execute the file move post steps when new target business object data status is not set to "VALID".
uploadDownloadHelperService.executeFileMoveAfterSteps(completeUploadSingleParamsDto);
// Refresh the data entities.
sourceBusinessObjectDataEntity = businessObjectDataDaoHelper.getBusinessObjectDataEntity(businessObjectDataHelper.getBusinessObjectDataKey(sourceBusinessObjectDataEntity));
targetBusinessObjectDataEntity = businessObjectDataDaoHelper.getBusinessObjectDataEntity(businessObjectDataHelper.getBusinessObjectDataKey(targetBusinessObjectDataEntity));
// Validate the source and target business object data statuses.
assertEquals(BusinessObjectDataStatusEntity.DELETED, sourceBusinessObjectDataEntity.getStatus().getCode());
assertEquals(BusinessObjectDataStatusEntity.INVALID, targetBusinessObjectDataEntity.getStatus().getCode());
// Validate the updated DTO parameters.
assertEquals(BusinessObjectDataStatusEntity.DELETED, completeUploadSingleParamsDto.getSourceNewStatus());
assertEquals(BusinessObjectDataStatusEntity.RE_ENCRYPTING, completeUploadSingleParamsDto.getSourceOldStatus());
assertEquals(BusinessObjectDataStatusEntity.INVALID, completeUploadSingleParamsDto.getTargetNewStatus());
assertEquals(BusinessObjectDataStatusEntity.RE_ENCRYPTING, completeUploadSingleParamsDto.getTargetOldStatus());
}
Aggregations