Search in sources :

Example 56 with Tag

use of com.amazonaws.services.s3.model.Tag in project photon-model by vmware.

the class AWSInstanceContext method getAWSTags.

/**
 * Returns a set of {@link Tag} objects to be used to tag this instance in AWS.
 * Also adds a {@link AWSConstants#AWS_TAG_NAME}  tag.
 *
 * @return A set with AWS Tag objects
 */
public Collection<Tag> getAWSTags() {
    Set<Tag> instanceTags = new HashSet<>();
    instanceTags.add(new Tag(AWS_TAG_NAME, this.child.name));
    tagStates.forEach(tagState -> {
        Tag tag = new Tag(tagState.key, tagState.value);
        instanceTags.add(tag);
    });
    return instanceTags;
}
Also used : Tag(com.amazonaws.services.ec2.model.Tag) HashSet(java.util.HashSet)

Example 57 with Tag

use of com.amazonaws.services.s3.model.Tag in project herd by FINRAOS.

the class StoragePolicyProcessorHelperServiceTest method runExecuteStoragePolicyTransitionTest.

private void runExecuteStoragePolicyTransitionTest() {
    // Create S3FileTransferRequestParamsDto to access the S3 bucket location.
    // Since test S3 key prefix represents a directory, we add a trailing '/' character to it.
    S3FileTransferRequestParamsDto s3FileTransferRequestParamsDto = S3FileTransferRequestParamsDto.builder().withS3BucketName(S3_BUCKET_NAME).withS3KeyPrefix(TEST_S3_KEY_PREFIX + "/").build();
    // Create a list of storage files.
    List<StorageFile> storageFiles = new ArrayList<>();
    for (String file : LOCAL_FILES) {
        storageFiles.add(new StorageFile(String.format(String.format("%s/%s", TEST_S3_KEY_PREFIX, file)), FILE_SIZE_1_KB, ROW_COUNT));
    }
    try {
        // Put relative S3 files into the S3 bucket.
        for (StorageFile storageFile : storageFiles) {
            s3Operations.putObject(new PutObjectRequest(S3_BUCKET_NAME, storageFile.getFilePath(), new ByteArrayInputStream(new byte[(int) FILE_SIZE_1_KB]), null), null);
        }
        // Execute a storage policy transition.
        storagePolicyProcessorHelperService.executeStoragePolicyTransition(new StoragePolicyTransitionParamsDto(new BusinessObjectDataKey(BDEF_NAMESPACE, BDEF_NAME, FORMAT_USAGE_CODE, FORMAT_FILE_TYPE_CODE, FORMAT_VERSION, PARTITION_VALUE, NO_SUBPARTITION_VALUES, DATA_VERSION), STORAGE_NAME, NO_S3_ENDPOINT, S3_BUCKET_NAME, TEST_S3_KEY_PREFIX, NO_STORAGE_UNIT_STATUS, NO_STORAGE_UNIT_STATUS, storageFiles, S3_ARCHIVE_TO_GLACIER_TAG_KEY, S3_ARCHIVE_TO_GLACIER_TAG_VALUE, S3_OBJECT_TAGGER_ROLE_ARN, S3_OBJECT_TAGGER_ROLE_SESSION_NAME));
        // Validate that all S3 files are now tagged.
        for (StorageFile storageFile : storageFiles) {
            GetObjectTaggingResult getObjectTaggingResult = s3Operations.getObjectTagging(new GetObjectTaggingRequest(S3_BUCKET_NAME, storageFile.getFilePath()), null);
            assertEquals(Arrays.asList(new Tag(S3_ARCHIVE_TO_GLACIER_TAG_KEY, S3_ARCHIVE_TO_GLACIER_TAG_VALUE)), getObjectTaggingResult.getTagSet());
        }
    } finally {
        // Delete test files from S3 storage.
        if (!s3Dao.listDirectory(s3FileTransferRequestParamsDto).isEmpty()) {
            s3Dao.deleteDirectory(s3FileTransferRequestParamsDto);
        }
        s3Operations.rollback();
    }
}
Also used : GetObjectTaggingRequest(com.amazonaws.services.s3.model.GetObjectTaggingRequest) S3FileTransferRequestParamsDto(org.finra.herd.model.dto.S3FileTransferRequestParamsDto) ByteArrayInputStream(java.io.ByteArrayInputStream) StorageFile(org.finra.herd.model.api.xml.StorageFile) ArrayList(java.util.ArrayList) StoragePolicyTransitionParamsDto(org.finra.herd.model.dto.StoragePolicyTransitionParamsDto) Tag(com.amazonaws.services.s3.model.Tag) BusinessObjectDataKey(org.finra.herd.model.api.xml.BusinessObjectDataKey) PutObjectRequest(com.amazonaws.services.s3.model.PutObjectRequest) GetObjectTaggingResult(com.amazonaws.services.s3.model.GetObjectTaggingResult)

Example 58 with Tag

use of com.amazonaws.services.s3.model.Tag in project android-simpl3r by jgilfelt.

the class Uploader method start.

/**
 * Initiate a multipart file upload to Amazon S3
 *
 * @return the URL of a successfully uploaded file
 */
public String start() {
    // initialize
    List<PartETag> partETags = new ArrayList<PartETag>();
    final long contentLength = file.length();
    long filePosition = 0;
    int startPartNumber = 1;
    userInterrupted = false;
    userAborted = false;
    bytesUploaded = 0;
    // check if we can resume an incomplete download
    String uploadId = getCachedUploadId();
    if (uploadId != null) {
        // we can resume the download
        Log.i(TAG, "resuming upload for " + uploadId);
        // get the cached etags
        List<PartETag> cachedEtags = getCachedPartEtags();
        partETags.addAll(cachedEtags);
        // calculate the start position for resume
        startPartNumber = cachedEtags.size() + 1;
        filePosition = (startPartNumber - 1) * partSize;
        bytesUploaded = filePosition;
        Log.i(TAG, "resuming at part " + startPartNumber + " position " + filePosition);
    } else {
        // initiate a new multi part upload
        Log.i(TAG, "initiating new upload");
        InitiateMultipartUploadRequest initRequest = new InitiateMultipartUploadRequest(s3bucketName, s3key);
        configureInitiateRequest(initRequest);
        InitiateMultipartUploadResult initResponse = s3Client.initiateMultipartUpload(initRequest);
        uploadId = initResponse.getUploadId();
    }
    final AbortMultipartUploadRequest abortRequest = new AbortMultipartUploadRequest(s3bucketName, s3key, uploadId);
    for (int k = startPartNumber; filePosition < contentLength; k++) {
        long thisPartSize = Math.min(partSize, (contentLength - filePosition));
        Log.i(TAG, "starting file part " + k + " with size " + thisPartSize);
        UploadPartRequest uploadRequest = new UploadPartRequest().withBucketName(s3bucketName).withKey(s3key).withUploadId(uploadId).withPartNumber(k).withFileOffset(filePosition).withFile(file).withPartSize(thisPartSize);
        ProgressListener s3progressListener = new ProgressListener() {

            public void progressChanged(ProgressEvent progressEvent) {
                // TODO calling shutdown too brute force?
                if (userInterrupted) {
                    s3Client.shutdown();
                    throw new UploadIterruptedException("User interrupted");
                } else if (userAborted) {
                    // aborted requests cannot be resumed, so clear any cached etags
                    clearProgressCache();
                    s3Client.abortMultipartUpload(abortRequest);
                    s3Client.shutdown();
                }
                bytesUploaded += progressEvent.getBytesTransfered();
                // Log.d(TAG, "bytesUploaded=" + bytesUploaded);
                // broadcast progress
                float fpercent = ((bytesUploaded * 100) / contentLength);
                int percent = Math.round(fpercent);
                if (progressListener != null) {
                    progressListener.progressChanged(progressEvent, bytesUploaded, percent);
                }
            }
        };
        uploadRequest.setProgressListener(s3progressListener);
        UploadPartResult result = s3Client.uploadPart(uploadRequest);
        partETags.add(result.getPartETag());
        // cache the part progress for this upload
        if (k == 1) {
            initProgressCache(uploadId);
        }
        // store part etag
        cachePartEtag(result);
        filePosition += thisPartSize;
    }
    CompleteMultipartUploadRequest compRequest = new CompleteMultipartUploadRequest(s3bucketName, s3key, uploadId, partETags);
    CompleteMultipartUploadResult result = s3Client.completeMultipartUpload(compRequest);
    bytesUploaded = 0;
    Log.i(TAG, "upload complete for " + uploadId);
    clearProgressCache();
    return result.getLocation();
}
Also used : InitiateMultipartUploadResult(com.amazonaws.services.s3.model.InitiateMultipartUploadResult) ArrayList(java.util.ArrayList) InitiateMultipartUploadRequest(com.amazonaws.services.s3.model.InitiateMultipartUploadRequest) UploadPartRequest(com.amazonaws.services.s3.model.UploadPartRequest) AbortMultipartUploadRequest(com.amazonaws.services.s3.model.AbortMultipartUploadRequest) CompleteMultipartUploadResult(com.amazonaws.services.s3.model.CompleteMultipartUploadResult) ProgressEvent(com.amazonaws.services.s3.model.ProgressEvent) PartETag(com.amazonaws.services.s3.model.PartETag) UploadPartResult(com.amazonaws.services.s3.model.UploadPartResult) ProgressListener(com.amazonaws.services.s3.model.ProgressListener) CompleteMultipartUploadRequest(com.amazonaws.services.s3.model.CompleteMultipartUploadRequest)

Example 59 with Tag

use of com.amazonaws.services.s3.model.Tag in project SimianArmy by Netflix.

the class AWSClient method createTagsForResources.

@Override
public void createTagsForResources(Map<String, String> keyValueMap, String... resourceIds) {
    Validate.notNull(keyValueMap);
    Validate.notEmpty(keyValueMap);
    Validate.notNull(resourceIds);
    Validate.notEmpty(resourceIds);
    AmazonEC2 ec2Client = ec2Client();
    List<Tag> tags = new ArrayList<Tag>();
    for (Map.Entry<String, String> entry : keyValueMap.entrySet()) {
        tags.add(new Tag(entry.getKey(), entry.getValue()));
    }
    CreateTagsRequest req = new CreateTagsRequest(Arrays.asList(resourceIds), tags);
    ec2Client.createTags(req);
}
Also used : AmazonEC2(com.amazonaws.services.ec2.AmazonEC2) Tag(com.amazonaws.services.ec2.model.Tag)

Example 60 with Tag

use of com.amazonaws.services.s3.model.Tag in project aws-doc-sdk-examples by awsdocs.

the class GetObjectTags2 method main.

public static void main(String[] args) {
    if (args.length < 2) {
        System.out.println("Please specify a bucket name and key name");
        System.exit(1);
    }
    // snippet-start:[s3.java.getobjecttags.main]
    String bucketName = args[0];
    String keyName = args[1];
    System.out.println("Retrieving Object Tags for  " + keyName);
    final AmazonS3 s3 = AmazonS3ClientBuilder.standard().withRegion(Regions.DEFAULT_REGION).build();
    try {
        GetObjectTaggingRequest getTaggingRequest = new GetObjectTaggingRequest(bucketName, keyName);
        GetObjectTaggingResult tags = s3.getObjectTagging(getTaggingRequest);
        List<Tag> tagSet = tags.getTagSet();
        // Iterate through the list
        Iterator<Tag> tagIterator = tagSet.iterator();
        while (tagIterator.hasNext()) {
            Tag tag = (Tag) tagIterator.next();
            System.out.println(tag.getKey());
            System.out.println(tag.getValue());
        }
    } catch (AmazonServiceException e) {
        System.err.println(e.getErrorMessage());
        System.exit(1);
    }
// snippet-end:[s3.java.getobjecttags.main]
}
Also used : GetObjectTaggingRequest(com.amazonaws.services.s3.model.GetObjectTaggingRequest) AmazonS3(com.amazonaws.services.s3.AmazonS3) AmazonServiceException(com.amazonaws.AmazonServiceException) Tag(com.amazonaws.services.s3.model.Tag) GetObjectTaggingResult(com.amazonaws.services.s3.model.GetObjectTaggingResult)

Aggregations

Tag (com.amazonaws.services.ec2.model.Tag)38 ArrayList (java.util.ArrayList)30 Tag (com.amazonaws.services.s3.model.Tag)19 HashMap (java.util.HashMap)18 Test (org.junit.Test)17 List (java.util.List)16 Instance (com.amazonaws.services.ec2.model.Instance)15 S3FileTransferRequestParamsDto (org.finra.herd.model.dto.S3FileTransferRequestParamsDto)14 Map (java.util.Map)12 HashSet (java.util.HashSet)10 GetObjectTaggingRequest (com.amazonaws.services.s3.model.GetObjectTaggingRequest)9 GetObjectTaggingResult (com.amazonaws.services.s3.model.GetObjectTaggingResult)9 Utils (com.vmware.xenon.common.Utils)9 Set (java.util.Set)9 File (java.io.File)8 CreateTagsRequest (com.amazonaws.services.ec2.model.CreateTagsRequest)7 Reservation (com.amazonaws.services.ec2.model.Reservation)7 S3ObjectSummary (com.amazonaws.services.s3.model.S3ObjectSummary)6 TagState (com.vmware.photon.controller.model.resources.TagService.TagState)6 DeferredResult (com.vmware.xenon.common.DeferredResult)6