use of org.finra.herd.model.dto.S3FileTransferRequestParamsDto in project herd by FINRAOS.
the class UploadDownloadHelperServiceImpl method prepareForFileMoveImpl.
/**
* Prepares to move an S3 file from the source bucket to the target bucket. On success, both the target and source business object data statuses are set to
* "RE-ENCRYPTING" and the DTO is updated accordingly.
*
* @param objectKey the object key (i.e. filename)
* @param completeUploadSingleParamsDto the DTO to be initialized with parameters required for complete upload single message processing
*/
protected void prepareForFileMoveImpl(String objectKey, CompleteUploadSingleParamsDto completeUploadSingleParamsDto) {
try {
// Obtain the source business object data entity.
BusinessObjectDataEntity sourceBusinessObjectDataEntity = storageFileDaoHelper.getStorageFileEntity(StorageEntity.MANAGED_LOADING_DOCK_STORAGE, objectKey).getStorageUnit().getBusinessObjectData();
// Get the status and key of the source business object data entity.
completeUploadSingleParamsDto.setSourceOldStatus(sourceBusinessObjectDataEntity.getStatus().getCode());
completeUploadSingleParamsDto.setSourceBusinessObjectDataKey(businessObjectDataHelper.getBusinessObjectDataKey(sourceBusinessObjectDataEntity));
// Find the target business object data by the source business object data's partition value, which should have been an UUID.
// This is assuming that the target has the same partition value as the source, and that there exist one and only one target
// business object data for this UUID.
BusinessObjectDataEntity targetBusinessObjectDataEntity = getTargetBusinessObjectDataEntity(sourceBusinessObjectDataEntity);
// Get the status and key of the target business object data entity.
completeUploadSingleParamsDto.setTargetOldStatus(targetBusinessObjectDataEntity.getStatus().getCode());
completeUploadSingleParamsDto.setTargetBusinessObjectDataKey(businessObjectDataHelper.getBusinessObjectDataKey(targetBusinessObjectDataEntity));
// This check effectively discards any duplicate SQS messages coming from S3 for the same uploaded file.
for (BusinessObjectDataEntity businessObjectDataEntity : Arrays.asList(sourceBusinessObjectDataEntity, targetBusinessObjectDataEntity)) {
if (!BusinessObjectDataStatusEntity.UPLOADING.equals(businessObjectDataEntity.getStatus().getCode())) {
LOGGER.info("Ignoring S3 notification since business object data status \"{}\" does not match the expected status \"{}\". " + "businessObjectDataKey={}", businessObjectDataEntity.getStatus().getCode(), BusinessObjectDataStatusEntity.UPLOADING, jsonHelper.objectToJson(businessObjectDataHelper.getBusinessObjectDataKey(businessObjectDataEntity)));
// method skip the rest of the steps required to complete the upload single message processing.
return;
}
}
// Get the S3 managed "loading dock" storage entity and make sure it exists.
StorageEntity s3ManagedLoadingDockStorageEntity = storageDaoHelper.getStorageEntity(StorageEntity.MANAGED_LOADING_DOCK_STORAGE);
// Get bucket name for S3 managed "loading dock" storage. Please note that this attribute value is required.
completeUploadSingleParamsDto.setSourceBucketName(storageHelper.getStorageBucketName(s3ManagedLoadingDockStorageEntity));
// Get the storage unit entity for this business object data in the S3 managed "loading dock" storage and make sure it exists.
StorageUnitEntity sourceStorageUnitEntity = storageUnitDaoHelper.getStorageUnitEntity(StorageEntity.MANAGED_LOADING_DOCK_STORAGE, sourceBusinessObjectDataEntity);
// Get the storage file entity.
StorageFileEntity sourceStorageFileEntity = IterableUtils.get(sourceStorageUnitEntity.getStorageFiles(), 0);
// Get the source storage file path.
completeUploadSingleParamsDto.setSourceFilePath(sourceStorageFileEntity.getPath());
// Get the AWS parameters.
AwsParamsDto awsParamsDto = awsHelper.getAwsParamsDto();
completeUploadSingleParamsDto.setAwsParams(awsParamsDto);
// Validate the source S3 file.
S3FileTransferRequestParamsDto s3FileTransferRequestParamsDto = S3FileTransferRequestParamsDto.builder().withS3BucketName(completeUploadSingleParamsDto.getSourceBucketName()).withS3KeyPrefix(completeUploadSingleParamsDto.getSourceFilePath()).withHttpProxyHost(awsParamsDto.getHttpProxyHost()).withHttpProxyPort(awsParamsDto.getHttpProxyPort()).build();
s3Dao.validateS3File(s3FileTransferRequestParamsDto, sourceStorageFileEntity.getFileSizeBytes());
// Get the S3 managed "external" storage entity and make sure it exists.
StorageEntity s3ManagedExternalStorageEntity = getUniqueStorage(targetBusinessObjectDataEntity);
// Get bucket name for S3 managed "external" storage. Please note that this attribute value is required.
completeUploadSingleParamsDto.setTargetBucketName(storageHelper.getStorageBucketName(s3ManagedExternalStorageEntity));
// Get AWS KMS External Key ID.
completeUploadSingleParamsDto.setKmsKeyId(storageHelper.getStorageKmsKeyId(s3ManagedExternalStorageEntity));
// Make sure the target does not already contain the file.
completeUploadSingleParamsDto.setTargetFilePath(IterableUtils.get(IterableUtils.get(targetBusinessObjectDataEntity.getStorageUnits(), 0).getStorageFiles(), 0).getPath());
assertS3ObjectKeyDoesNotExist(completeUploadSingleParamsDto.getTargetBucketName(), completeUploadSingleParamsDto.getTargetFilePath());
try {
// Change the status of the source and target business object data to RE-ENCRYPTING.
businessObjectDataDaoHelper.updateBusinessObjectDataStatus(sourceBusinessObjectDataEntity, BusinessObjectDataStatusEntity.RE_ENCRYPTING);
businessObjectDataDaoHelper.updateBusinessObjectDataStatus(targetBusinessObjectDataEntity, BusinessObjectDataStatusEntity.RE_ENCRYPTING);
}// caught by a business object data status check that occurs inside the prepareForFileMove() helper method.
catch (OptimisticLockException e) {
LOGGER.info("Ignoring S3 notification due to an optimistic lock exception caused by duplicate S3 event notifications. " + "sourceBusinessObjectDataKey={} targetBusinessObjectDataKey={}", jsonHelper.objectToJson(completeUploadSingleParamsDto.getSourceBusinessObjectDataKey()), jsonHelper.objectToJson(completeUploadSingleParamsDto.getTargetBusinessObjectDataKey()));
// method skip the rest of the steps required to complete the upload single message processing.
return;
}
// Set new status for the source and target business object data in the DTO.
completeUploadSingleParamsDto.setSourceNewStatus(BusinessObjectDataStatusEntity.RE_ENCRYPTING);
completeUploadSingleParamsDto.setTargetNewStatus(BusinessObjectDataStatusEntity.RE_ENCRYPTING);
} catch (RuntimeException e) {
// Update statuses for both the source and target business object data instances.
completeUploadSingleParamsDto.setSourceNewStatus(setAndReturnNewSourceBusinessObjectDataStatusAfterError(completeUploadSingleParamsDto.getSourceBusinessObjectDataKey()));
// Update statuses for both the source and target business object data instances.
completeUploadSingleParamsDto.setTargetNewStatus(setAndReturnNewTargetBusinessObjectDataStatusAfterError(completeUploadSingleParamsDto.getTargetBusinessObjectDataKey()));
// Delete the source S3 file. Please note that the method below only logs runtime exceptions without re-throwing them.
deleteSourceS3ObjectAfterError(completeUploadSingleParamsDto.getSourceBucketName(), completeUploadSingleParamsDto.getSourceFilePath(), completeUploadSingleParamsDto.getSourceBusinessObjectDataKey());
// Log the error.
LOGGER.error("Failed to process upload single completion request for file. s3Key=\"{}\"", objectKey, e);
}
// If a status update occurred for the source business object data, create a business object data notification for this event.
if (completeUploadSingleParamsDto.getSourceNewStatus() != null) {
notificationEventService.processBusinessObjectDataNotificationEventAsync(NotificationEventTypeEntity.EventTypesBdata.BUS_OBJCT_DATA_STTS_CHG, completeUploadSingleParamsDto.getSourceBusinessObjectDataKey(), completeUploadSingleParamsDto.getSourceNewStatus(), completeUploadSingleParamsDto.getSourceOldStatus());
}
// If a status update occurred for the target business object data, create a business object data notification for this event.
if (completeUploadSingleParamsDto.getTargetNewStatus() != null) {
notificationEventService.processBusinessObjectDataNotificationEventAsync(NotificationEventTypeEntity.EventTypesBdata.BUS_OBJCT_DATA_STTS_CHG, completeUploadSingleParamsDto.getTargetBusinessObjectDataKey(), completeUploadSingleParamsDto.getTargetNewStatus(), completeUploadSingleParamsDto.getTargetOldStatus());
}
}
use of org.finra.herd.model.dto.S3FileTransferRequestParamsDto in project herd by FINRAOS.
the class UploadDownloadHelperServiceImpl method assertS3ObjectKeyDoesNotExist.
@Override
public void assertS3ObjectKeyDoesNotExist(String bucketName, String key) {
S3FileTransferRequestParamsDto s3FileTransferRequestParamsDto = new S3FileTransferRequestParamsDto();
s3FileTransferRequestParamsDto.setS3BucketName(bucketName);
s3FileTransferRequestParamsDto.setS3KeyPrefix(key);
AwsParamsDto awsParamsDto = awsHelper.getAwsParamsDto();
String httpProxyHost = awsParamsDto.getHttpProxyHost();
s3FileTransferRequestParamsDto.setHttpProxyHost(httpProxyHost);
Integer httpProxyPort = awsParamsDto.getHttpProxyPort();
s3FileTransferRequestParamsDto.setHttpProxyPort(httpProxyPort);
Assert.isTrue(!s3Dao.s3FileExists(s3FileTransferRequestParamsDto), String.format("A S3 object already exists in bucket \"%s\" and key \"%s\".", bucketName, key));
}
use of org.finra.herd.model.dto.S3FileTransferRequestParamsDto in project herd by FINRAOS.
the class BusinessObjectDataFinalizeRestoreHelperServiceTest method testExecuteS3SpecificStepsGlacierS3FileStillRestoring.
@Test
public void testExecuteS3SpecificStepsGlacierS3FileStillRestoring() throws Exception {
// Create S3FileTransferRequestParamsDto to access the S3 bucket.
// Since test S3 key prefix represents a directory, we add a trailing '/' character to it.
S3FileTransferRequestParamsDto s3FileTransferRequestParamsDto = S3FileTransferRequestParamsDto.builder().withS3BucketName(S3_BUCKET_NAME).withS3KeyPrefix(TEST_S3_KEY_PREFIX + "/").build();
// Create a business object data key.
BusinessObjectDataKey businessObjectDataKey = new BusinessObjectDataKey(BDEF_NAMESPACE, BDEF_NAME, FORMAT_USAGE_CODE, FORMAT_FILE_TYPE_CODE, FORMAT_VERSION, PARTITION_VALUE, SUBPARTITION_VALUES, DATA_VERSION);
// Create a business object data restore DTO.
BusinessObjectDataRestoreDto businessObjectDataRestoreDto = new BusinessObjectDataRestoreDto(businessObjectDataKey, STORAGE_NAME, NO_S3_ENDPOINT, S3_BUCKET_NAME, TEST_S3_KEY_PREFIX, NO_STORAGE_UNIT_STATUS, NO_STORAGE_UNIT_STATUS, Arrays.asList(new StorageFile(TEST_S3_KEY_PREFIX + "/" + LOCAL_FILE, FILE_SIZE_1_KB, NO_ROW_COUNT)), NO_EXCEPTION);
try {
// Put a "still restoring" Glacier storage class S3 file in the Glacier S3 bucket.
ObjectMetadata metadata = new ObjectMetadata();
metadata.setHeader(Headers.STORAGE_CLASS, StorageClass.Glacier);
metadata.setOngoingRestore(true);
s3Operations.putObject(new PutObjectRequest(S3_BUCKET_NAME, String.format("%s/%s", TEST_S3_KEY_PREFIX, LOCAL_FILE), new ByteArrayInputStream(new byte[(int) FILE_SIZE_1_KB]), metadata), NO_S3_CLIENT);
// Try to execute S3 specific steps to finalize a restore for the storage unit when Glacier S3 file is still restoring.
try {
businessObjectDataFinalizeRestoreHelperService.executeS3SpecificSteps(businessObjectDataRestoreDto);
fail();
} catch (IllegalArgumentException e) {
assertEquals(String.format("Archived Glacier S3 file \"%s/%s\" is not restored. " + "StorageClass {GLACIER}, OngoingRestore flag {true}, S3 bucket name {%s}", TEST_S3_KEY_PREFIX, LOCAL_FILE, S3_BUCKET_NAME), e.getMessage());
}
// Validate that we have a Glacier S3 file at the expected S3 location.
assertEquals(1, s3Dao.listDirectory(s3FileTransferRequestParamsDto).size());
} finally {
// Delete test files from S3 storage.
if (!s3Dao.listDirectory(s3FileTransferRequestParamsDto).isEmpty()) {
s3Dao.deleteDirectory(s3FileTransferRequestParamsDto);
}
s3Operations.rollback();
}
}
use of org.finra.herd.model.dto.S3FileTransferRequestParamsDto in project herd by FINRAOS.
the class BusinessObjectDataFinalizeRestoreHelperServiceTest method testExecuteS3SpecificSteps.
@Test
public void testExecuteS3SpecificSteps() throws Exception {
// Create S3FileTransferRequestParamsDto to access the S3 bucket.
// Since test S3 key prefix represents a directory, we add a trailing '/' character to it.
S3FileTransferRequestParamsDto s3FileTransferRequestParamsDto = S3FileTransferRequestParamsDto.builder().withS3BucketName(S3_BUCKET_NAME).withS3KeyPrefix(TEST_S3_KEY_PREFIX + "/").build();
// Create a business object data key.
BusinessObjectDataKey businessObjectDataKey = new BusinessObjectDataKey(BDEF_NAMESPACE, BDEF_NAME, FORMAT_USAGE_CODE, FORMAT_FILE_TYPE_CODE, FORMAT_VERSION, PARTITION_VALUE, SUBPARTITION_VALUES, DATA_VERSION);
// Create a list of storage files.
List<StorageFile> storageFiles = new ArrayList<>();
for (String filePath : LOCAL_FILES) {
storageFiles.add(new StorageFile(TEST_S3_KEY_PREFIX + "/" + filePath, FILE_SIZE_1_KB, NO_ROW_COUNT));
}
// Create a business object data restore DTO.
BusinessObjectDataRestoreDto businessObjectDataRestoreDto = new BusinessObjectDataRestoreDto(businessObjectDataKey, STORAGE_NAME, NO_S3_ENDPOINT, S3_BUCKET_NAME, TEST_S3_KEY_PREFIX, NO_STORAGE_UNIT_STATUS, NO_STORAGE_UNIT_STATUS, storageFiles, NO_EXCEPTION);
try {
// Put relative Glacier storage class S3 files in the S3 bucket.
for (StorageFile storageFile : storageFiles) {
ObjectMetadata metadata = new ObjectMetadata();
metadata.setHeader(Headers.STORAGE_CLASS, StorageClass.Glacier);
metadata.setOngoingRestore(false);
s3Operations.putObject(new PutObjectRequest(S3_BUCKET_NAME, storageFile.getFilePath(), new ByteArrayInputStream(new byte[storageFile.getFileSizeBytes().intValue()]), metadata), NO_S3_CLIENT);
}
// Execute S3 specific steps to finalize a restore for the Glacier storage unit.
businessObjectDataFinalizeRestoreHelperService.executeS3SpecificSteps(businessObjectDataRestoreDto);
// Validate that we have the restored S3 files at the expected S3 location.
assertEquals(storageFiles.size(), s3Dao.listDirectory(s3FileTransferRequestParamsDto).size());
} finally {
// Delete test files from S3 storage.
if (!s3Dao.listDirectory(s3FileTransferRequestParamsDto).isEmpty()) {
s3Dao.deleteDirectory(s3FileTransferRequestParamsDto);
}
s3Operations.rollback();
}
}
use of org.finra.herd.model.dto.S3FileTransferRequestParamsDto in project herd by FINRAOS.
the class StoragePolicyProcessorHelperServiceImpl method executeStoragePolicyTransitionImpl.
/**
* Executes a storage policy transition as per specified storage policy selection.
*
* @param storagePolicyTransitionParamsDto the storage policy transition DTO that contains parameters needed to perform a storage policy transition
*/
protected void executeStoragePolicyTransitionImpl(StoragePolicyTransitionParamsDto storagePolicyTransitionParamsDto) {
// Create an S3 file transfer parameters DTO to access the S3 bucket.
// Since the S3 key prefix represents a directory, we add a trailing '/' character to it.
S3FileTransferRequestParamsDto s3FileTransferRequestParamsDto = storageHelper.getS3FileTransferRequestParamsDto();
s3FileTransferRequestParamsDto.setS3Endpoint(storagePolicyTransitionParamsDto.getS3Endpoint());
s3FileTransferRequestParamsDto.setS3BucketName(storagePolicyTransitionParamsDto.getS3BucketName());
s3FileTransferRequestParamsDto.setS3KeyPrefix(StringUtils.appendIfMissing(storagePolicyTransitionParamsDto.getS3KeyPrefix(), "/"));
// Create an S3 file transfer parameters DTO to be used for S3 object tagging operation.
S3FileTransferRequestParamsDto s3ObjectTaggerParamsDto = storageHelper.getS3FileTransferRequestParamsDtoByRole(storagePolicyTransitionParamsDto.getS3ObjectTaggerRoleArn(), storagePolicyTransitionParamsDto.getS3ObjectTaggerRoleSessionName());
s3ObjectTaggerParamsDto.setS3Endpoint(storagePolicyTransitionParamsDto.getS3Endpoint());
// Get actual S3 files by selecting all S3 keys matching the S3 key prefix form the S3 bucket.
// When listing S3 files, we ignore 0 byte objects that represent S3 directories.
List<S3ObjectSummary> actualS3FilesWithoutZeroByteDirectoryMarkers = s3Service.listDirectory(s3FileTransferRequestParamsDto, true);
// Validate existence of the S3 files.
storageFileHelper.validateRegisteredS3Files(storagePolicyTransitionParamsDto.getStorageFiles(), actualS3FilesWithoutZeroByteDirectoryMarkers, storagePolicyTransitionParamsDto.getStorageName(), storagePolicyTransitionParamsDto.getBusinessObjectDataKey());
// Get actual S3 files by selecting all S3 keys matching the S3 key prefix form the S3 bucket.
// This time, we do not ignore 0 byte objects that represent S3 directories.
List<S3ObjectSummary> actualS3Files = s3Service.listDirectory(s3FileTransferRequestParamsDto, false);
// Set the list of files to be tagged for archiving.
s3FileTransferRequestParamsDto.setFiles(storageFileHelper.getFiles(storageFileHelper.createStorageFilesFromS3ObjectSummaries(actualS3Files)));
// Tag the S3 objects to initiate the archiving.
s3Service.tagObjects(s3FileTransferRequestParamsDto, s3ObjectTaggerParamsDto, new Tag(storagePolicyTransitionParamsDto.getS3ObjectTagKey(), storagePolicyTransitionParamsDto.getS3ObjectTagValue()));
// Log a list of files tagged in the S3 bucket.
if (LOGGER.isInfoEnabled()) {
LOGGER.info("Successfully tagged files in S3 bucket. s3BucketName=\"{}\" s3KeyCount={} s3ObjectTagKey=\"{}\" s3ObjectTagValue=\"{}\"", s3FileTransferRequestParamsDto.getS3BucketName(), actualS3Files.size(), storagePolicyTransitionParamsDto.getS3ObjectTagKey(), storagePolicyTransitionParamsDto.getS3ObjectTagValue());
for (S3ObjectSummary s3File : actualS3FilesWithoutZeroByteDirectoryMarkers) {
LOGGER.info("s3Key=\"{}\"", s3File.getKey());
}
}
}
Aggregations