use of software.amazon.awssdk.services.ec2.model.Tag in project herd by FINRAOS.
the class BusinessObjectDataServiceDestroyBusinessObjectDataTest method testDestroyBusinessObjectData.
@Test
public void testDestroyBusinessObjectData() throws Exception {
// Create a primary partition value that satisfies the retention threshold check.
String primaryPartitionValue = DateFormatUtils.format(DateUtils.addDays(new Date(), -1 * (RETENTION_PERIOD_DAYS + 1)), AbstractHerdDao.DEFAULT_SINGLE_DAY_DATE_MASK);
// Build the expected S3 key prefix for test business object data.
String s3KeyPrefix = getExpectedS3KeyPrefix(BDEF_NAMESPACE, DATA_PROVIDER_NAME, BDEF_NAME, FORMAT_USAGE_CODE, FORMAT_FILE_TYPE_CODE, FORMAT_VERSION, PARTITION_KEY, primaryPartitionValue, null, null, DATA_VERSION);
// Create S3FileTransferRequestParamsDto to access the S3 bucket location.
// Since test S3 key prefix represents a directory, we add a trailing '/' character to it.
S3FileTransferRequestParamsDto s3FileTransferRequestParamsDto = S3FileTransferRequestParamsDto.builder().withS3BucketName(S3_BUCKET_NAME).withS3KeyPrefix(s3KeyPrefix + "/").build();
// Create an S3 storage with the relative attributes.
storageDaoTestHelper.createStorageEntity(STORAGE_NAME, StoragePlatformEntity.S3, Arrays.asList(new Attribute(configurationHelper.getProperty(ConfigurationValue.S3_ATTRIBUTE_NAME_BUCKET_NAME), AbstractServiceTest.S3_BUCKET_NAME), new Attribute(configurationHelper.getProperty(ConfigurationValue.S3_ATTRIBUTE_NAME_KEY_PREFIX_VELOCITY_TEMPLATE), AbstractServiceTest.S3_KEY_PREFIX_VELOCITY_TEMPLATE), new Attribute(configurationHelper.getProperty(ConfigurationValue.S3_ATTRIBUTE_NAME_VALIDATE_PATH_PREFIX), Boolean.TRUE.toString()), new Attribute(configurationHelper.getProperty(ConfigurationValue.S3_ATTRIBUTE_NAME_VALIDATE_FILE_EXISTENCE), Boolean.TRUE.toString())));
// Create a business object data key.
BusinessObjectDataKey businessObjectDataKey = new BusinessObjectDataKey(BDEF_NAMESPACE, BDEF_NAME, FORMAT_USAGE_CODE, FORMAT_FILE_TYPE_CODE, FORMAT_VERSION, primaryPartitionValue, NO_SUBPARTITION_VALUES, DATA_VERSION);
// Create and persist a storage unit in the storage.
StorageUnitEntity storageUnitEntity = storageUnitDaoTestHelper.createStorageUnitEntity(STORAGE_NAME, businessObjectDataKey, LATEST_VERSION_FLAG_SET, BusinessObjectDataStatusEntity.VALID, StorageUnitStatusEntity.ENABLED, NO_STORAGE_DIRECTORY_PATH);
// Add storage files to the storage unit.
for (String filePath : LOCAL_FILES) {
storageFileDaoTestHelper.createStorageFileEntity(storageUnitEntity, s3KeyPrefix + "/" + filePath, FILE_SIZE_1_KB, ROW_COUNT_1000);
}
// Get the storage files.
List<StorageFile> storageFiles = storageFileHelper.createStorageFilesFromEntities(storageUnitEntity.getStorageFiles());
// Get the business object format entity.
BusinessObjectFormatEntity businessObjectFormatEntity = storageUnitEntity.getBusinessObjectData().getBusinessObjectFormat();
// Set the retention information for the business object format, which is the latest version business object format.
businessObjectFormatEntity.setRetentionType(retentionTypeDao.getRetentionTypeByCode(RetentionTypeEntity.PARTITION_VALUE));
businessObjectFormatEntity.setRetentionPeriodInDays(RETENTION_PERIOD_DAYS);
businessObjectFormatDao.saveAndRefresh(businessObjectFormatEntity);
// Override configuration to specify some settings required for testing.
Map<String, Object> overrideMap = new HashMap<>();
overrideMap.put(ConfigurationValue.S3_OBJECT_DELETE_TAG_KEY.getKey(), S3_OBJECT_TAG_KEY);
overrideMap.put(ConfigurationValue.S3_OBJECT_DELETE_TAG_VALUE.getKey(), S3_OBJECT_TAG_VALUE);
overrideMap.put(ConfigurationValue.S3_OBJECT_DELETE_ROLE_ARN.getKey(), S3_OBJECT_TAGGER_ROLE_ARN);
overrideMap.put(ConfigurationValue.S3_OBJECT_DELETE_ROLE_SESSION_NAME.getKey(), S3_OBJECT_TAGGER_ROLE_SESSION_NAME);
modifyPropertySourceInEnvironment(overrideMap);
try {
// Put relative S3 files into the S3 bucket.
for (StorageFile storageFile : storageFiles) {
s3Operations.putObject(new PutObjectRequest(S3_BUCKET_NAME, storageFile.getFilePath(), new ByteArrayInputStream(new byte[storageFile.getFileSizeBytes().intValue()]), null), null);
}
// Request to destroy business object data.
BusinessObjectData result = businessObjectDataService.destroyBusinessObjectData(businessObjectDataKey);
// Validate the result.
assertNotNull(result);
assertEquals(storageUnitEntity.getBusinessObjectData().getId(), Integer.valueOf(result.getId()));
// Validate the status of the storage unit entity.
assertEquals(StorageUnitStatusEntity.DISABLED, storageUnitEntity.getStatus().getCode());
// Validate the status of the business object data entity.
assertEquals(BusinessObjectDataStatusEntity.DELETED, storageUnitEntity.getBusinessObjectData().getStatus().getCode());
// Validate that all S3 files are now tagged.
for (StorageFile storageFile : storageFiles) {
GetObjectTaggingResult getObjectTaggingResult = s3Operations.getObjectTagging(new GetObjectTaggingRequest(S3_BUCKET_NAME, storageFile.getFilePath()), null);
assertEquals(Arrays.asList(new Tag(S3_OBJECT_TAG_KEY, S3_OBJECT_TAG_VALUE)), getObjectTaggingResult.getTagSet());
}
} finally {
// Delete test files from S3 storage.
if (!s3Dao.listDirectory(s3FileTransferRequestParamsDto).isEmpty()) {
s3Dao.deleteDirectory(s3FileTransferRequestParamsDto);
}
s3Operations.rollback();
// Restore the property sources so we don't affect other tests.
restorePropertySourceInEnvironment();
}
}
use of software.amazon.awssdk.services.ec2.model.Tag in project herd by FINRAOS.
the class S3ServiceTest method testTagObjects.
@Test
public void testTagObjects() {
// Create an S3 file transfer request parameters DTO to access S3 objects.
S3FileTransferRequestParamsDto s3FileTransferRequestParamsDto = new S3FileTransferRequestParamsDto();
s3FileTransferRequestParamsDto.setFiles(Arrays.asList(new File(TEST_S3_KEY_PREFIX + "/" + LOCAL_FILE)));
// Create an S3 file transfer request parameters DTO to tag S3 objects.
S3FileTransferRequestParamsDto s3ObjectTaggerParamsDto = new S3FileTransferRequestParamsDto();
s3ObjectTaggerParamsDto.setAwsAccessKeyId(AWS_ASSUMED_ROLE_ACCESS_KEY);
s3ObjectTaggerParamsDto.setAwsSecretKey(AWS_ASSUMED_ROLE_SECRET_KEY);
s3ObjectTaggerParamsDto.setSessionToken(AWS_ASSUMED_ROLE_SESSION_TOKEN);
// Create an S3 object tag.
Tag tag = new Tag(S3_OBJECT_TAG_KEY, S3_OBJECT_TAG_VALUE);
// Call the method under test.
s3Service.tagObjects(s3FileTransferRequestParamsDto, s3ObjectTaggerParamsDto, tag);
// Verify the external calls.
verify(s3Dao).tagObjects(s3FileTransferRequestParamsDto, s3ObjectTaggerParamsDto, tag);
verifyNoMoreInteractions(s3Dao);
}
use of software.amazon.awssdk.services.ec2.model.Tag in project photon-model by vmware.
the class AWSRemoteCleanup method cleanUpVpc.
/**
* Cleaning all VPC's that are not tagged with a name: enumtest-vpc or a default VPC in US_EAST_1 region
* Deleting a VPC would require its dependencies to be deleted in the following order:
* 1) EC2 Instances
* 2) NAT Gateway
* 3) Internet Gateway
* 4) VPN Gateway
* 5) Network ACL's
* 6) Security Group ( not deleting default SG)
* 7) Subnets
* NOTE: Not deleting RouteTables currently
*/
@Test
public void cleanUpVpc() {
if (this.isMock) {
return;
}
AmazonEC2 usEastEc2Client = this.ec2Clients.get(US_EAST_1_TAG);
DescribeVpcsResult vpcsResult = usEastEc2Client.describeVpcs();
List<Vpc> vpcs = vpcsResult.getVpcs();
List<String> vpcIdsToBeDeleted = new ArrayList<>();
List<String> enumTestVpcIds = new ArrayList<>();
try {
vpcs.stream().forEach(vpc -> {
vpc.getTags().stream().filter(tag -> tag.getKey().equalsIgnoreCase(NAME_TAG_KEY) && this.vpcTagsNotToBeDeleted.contains(tag.getValue().toLowerCase())).forEach(tag -> enumTestVpcIds.add(vpc.getVpcId()));
if (!vpc.getIsDefault()) {
vpcIdsToBeDeleted.add(vpc.getVpcId());
}
});
vpcIdsToBeDeleted.removeAll(enumTestVpcIds);
vpcIdsToBeDeleted.stream().forEach(vpcId -> {
DescribeInstancesRequest instancesRequest = new DescribeInstancesRequest().withFilters(new Filter(VPC_KEY, Collections.singletonList(vpcId)));
DescribeInstancesResult instancesResult = usEastEc2Client.describeInstances(instancesRequest);
deleteAwsEc2instances(vpcIdsToBeDeleted, instancesResult, usEastEc2Client);
deleteNATGateway(vpcId, usEastEc2Client);
deleteNetworkInterfaces(vpcId, usEastEc2Client);
deleteInternetGateways(vpcId, usEastEc2Client);
deleteVirtualPrivateGateways(vpcId, usEastEc2Client);
disassociateAndDeleteNetworkACLs(vpcId, usEastEc2Client);
deleteSecurityGroups(vpcId, usEastEc2Client);
deleteSubnets(vpcId, usEastEc2Client);
DeleteVpcRequest deleteVpcRequest = new DeleteVpcRequest().withVpcId(vpcId);
this.host.log("Terminating stale vpc: %s", vpcId);
usEastEc2Client.deleteVpc(deleteVpcRequest);
});
} catch (Exception e) {
this.host.log(Level.INFO, e.getMessage());
}
}
use of software.amazon.awssdk.services.ec2.model.Tag in project photon-model by vmware.
the class AWSUtils method tagResourcesWithName.
/**
* Synchronous Tagging of one or many AWS resources with the provided name.
*/
public static void tagResourcesWithName(AmazonEC2AsyncClient client, String name, String... resourceIds) {
Tag awsNameTag = new Tag().withKey(AWS_TAG_NAME).withValue(name);
tagResources(client, Collections.singletonList(awsNameTag), resourceIds);
}
use of software.amazon.awssdk.services.ec2.model.Tag in project photon-model by vmware.
the class AWSComputeStateCreationAdapterService method createTags.
/**
* POSTs all tags for newly discovered instances. Even if some tags already exist we rely on
* IDEMPOTENT_POST behaviour and POST them again. All tags that got created successfully are
* stored in createdExternalTags list.
*/
private void createTags(AWSComputeStateCreationContext context, AWSComputeStateCreationStage next) {
// Get all tags from the instances to be created
Set<Tag> create = context.request.instancesToBeCreated.stream().flatMap(i -> i.getTags().stream()).collect(Collectors.toSet());
// Put them in a set to remove the duplicates
Set<Tag> allTags = new HashSet<>();
allTags.addAll(create);
// POST each of the tags. If a tag exists it won't be created again. We don't want the name
// tags, so filter them out
List<Operation> operations = new ArrayList<>();
Map<Long, Tag> tagsCreationOperationIdsMap = new ConcurrentHashMap<>();
allTags.stream().filter(t -> !AWSConstants.AWS_TAG_NAME.equals(t.getKey())).forEach(t -> {
TagState tagState = newTagState(t.getKey(), t.getValue(), true, context.request.tenantLinks);
Operation op = Operation.createPost(this, TagService.FACTORY_LINK).setBody(tagState);
operations.add(op);
tagsCreationOperationIdsMap.put(op.getId(), t);
});
if (operations.isEmpty()) {
context.creationStage = next;
handleComputeStateCreateOrUpdate(context);
} else {
OperationJoin.create(operations).setCompletion((ops, exs) -> {
if (exs != null && !exs.isEmpty()) {
logSevere(() -> String.format("Error creating %s external tags for compute" + "states: %s", exs.size(), Utils.toString(exs.get(0))));
}
ops.values().stream().filter(operation -> operation.getStatusCode() == Operation.STATUS_CODE_OK || operation.getStatusCode() == Operation.STATUS_CODE_NOT_MODIFIED).forEach(operation -> {
if (tagsCreationOperationIdsMap.containsKey(operation.getId())) {
context.createdExternalTags.add(tagsCreationOperationIdsMap.get(operation.getId()));
}
});
context.creationStage = next;
handleComputeStateCreateOrUpdate(context);
}).sendWith(this);
}
}
Aggregations