use of software.amazon.awssdk.services.s3.model.UploadPartRequest in project alluxio by Alluxio.
the class S3ALowLevelOutputStreamTest method mockS3ClientAndExecutor.
/**
* Mocks the S3 client and executor.
*/
private void mockS3ClientAndExecutor() throws Exception {
mMockS3Client = PowerMockito.mock(AmazonS3.class);
InitiateMultipartUploadResult initResult = new InitiateMultipartUploadResult();
when(mMockS3Client.initiateMultipartUpload(any(InitiateMultipartUploadRequest.class))).thenReturn(initResult);
initResult.setUploadId(UPLOAD_ID);
when(mMockS3Client.uploadPart(any(UploadPartRequest.class))).thenAnswer((InvocationOnMock invocation) -> {
Object[] args = invocation.getArguments();
UploadPartResult uploadResult = new UploadPartResult();
uploadResult.setPartNumber(((UploadPartRequest) args[0]).getPartNumber());
return uploadResult;
});
when(mMockS3Client.completeMultipartUpload(any(CompleteMultipartUploadRequest.class))).thenReturn(new CompleteMultipartUploadResult());
mMockTag = (ListenableFuture<PartETag>) PowerMockito.mock(ListenableFuture.class);
when(mMockTag.get()).thenReturn(new PartETag(1, "someTag"));
mMockExecutor = Mockito.mock(ListeningExecutorService.class);
when(mMockExecutor.submit(any(Callable.class))).thenReturn(mMockTag);
}
use of software.amazon.awssdk.services.s3.model.UploadPartRequest in project gradle by gradle.
the class S3Client method putMultiPartObject.
private void putMultiPartObject(InputStream inputStream, Long contentLength, URI destination) {
try {
S3RegionalResource s3RegionalResource = new S3RegionalResource(destination);
String bucketName = s3RegionalResource.getBucketName();
String s3BucketKey = s3RegionalResource.getKey();
configureClient(s3RegionalResource);
List<PartETag> partETags = new ArrayList<>();
InitiateMultipartUploadRequest initRequest = new InitiateMultipartUploadRequest(bucketName, s3BucketKey).withCannedACL(CannedAccessControlList.BucketOwnerFullControl);
InitiateMultipartUploadResult initResponse = amazonS3Client.initiateMultipartUpload(initRequest);
try {
long filePosition = 0;
long partSize = s3ConnectionProperties.getPartSize();
LOGGER.debug("Attempting to put resource:[{}] into s3 bucket [{}]", s3BucketKey, bucketName);
for (int partNumber = 1; filePosition < contentLength; partNumber++) {
partSize = Math.min(partSize, contentLength - filePosition);
UploadPartRequest uploadPartRequest = new UploadPartRequest().withBucketName(bucketName).withKey(s3BucketKey).withUploadId(initResponse.getUploadId()).withPartNumber(partNumber).withPartSize(partSize).withInputStream(inputStream);
partETags.add(amazonS3Client.uploadPart(uploadPartRequest).getPartETag());
filePosition += partSize;
}
CompleteMultipartUploadRequest completeRequest = new CompleteMultipartUploadRequest(bucketName, s3BucketKey, initResponse.getUploadId(), partETags);
amazonS3Client.completeMultipartUpload(completeRequest);
} catch (AmazonClientException e) {
amazonS3Client.abortMultipartUpload(new AbortMultipartUploadRequest(bucketName, s3BucketKey, initResponse.getUploadId()));
throw e;
}
} catch (AmazonClientException e) {
throw ResourceExceptions.putFailed(destination, e);
}
}
use of software.amazon.awssdk.services.s3.model.UploadPartRequest in project camel by apache.
the class S3Producer method processMultiPart.
public void processMultiPart(final Exchange exchange) throws Exception {
File filePayload = null;
Object obj = exchange.getIn().getMandatoryBody();
// Need to check if the message body is WrappedFile
if (obj instanceof WrappedFile) {
obj = ((WrappedFile<?>) obj).getFile();
}
if (obj instanceof File) {
filePayload = (File) obj;
} else {
throw new InvalidArgumentException("aws-s3: MultiPart upload requires a File input.");
}
ObjectMetadata objectMetadata = determineMetadata(exchange);
if (objectMetadata.getContentLength() == 0) {
objectMetadata.setContentLength(filePayload.length());
}
final String keyName = determineKey(exchange);
final InitiateMultipartUploadRequest initRequest = new InitiateMultipartUploadRequest(getConfiguration().getBucketName(), keyName, objectMetadata);
String storageClass = determineStorageClass(exchange);
if (storageClass != null) {
initRequest.setStorageClass(StorageClass.fromValue(storageClass));
}
String cannedAcl = exchange.getIn().getHeader(S3Constants.CANNED_ACL, String.class);
if (cannedAcl != null) {
CannedAccessControlList objectAcl = CannedAccessControlList.valueOf(cannedAcl);
initRequest.setCannedACL(objectAcl);
}
AccessControlList acl = exchange.getIn().getHeader(S3Constants.ACL, AccessControlList.class);
if (acl != null) {
// note: if cannedacl and acl are both specified the last one will be used. refer to
// PutObjectRequest#setAccessControlList for more details
initRequest.setAccessControlList(acl);
}
LOG.trace("Initiating multipart upload [{}] from exchange [{}]...", initRequest, exchange);
final InitiateMultipartUploadResult initResponse = getEndpoint().getS3Client().initiateMultipartUpload(initRequest);
final long contentLength = objectMetadata.getContentLength();
final List<PartETag> partETags = new ArrayList<PartETag>();
long partSize = getConfiguration().getPartSize();
CompleteMultipartUploadResult uploadResult = null;
long filePosition = 0;
try {
for (int part = 1; filePosition < contentLength; part++) {
partSize = Math.min(partSize, contentLength - filePosition);
UploadPartRequest uploadRequest = new UploadPartRequest().withBucketName(getConfiguration().getBucketName()).withKey(keyName).withUploadId(initResponse.getUploadId()).withPartNumber(part).withFileOffset(filePosition).withFile(filePayload).withPartSize(partSize);
LOG.trace("Uploading part [{}] for {}", part, keyName);
partETags.add(getEndpoint().getS3Client().uploadPart(uploadRequest).getPartETag());
filePosition += partSize;
}
CompleteMultipartUploadRequest compRequest = new CompleteMultipartUploadRequest(getConfiguration().getBucketName(), keyName, initResponse.getUploadId(), partETags);
uploadResult = getEndpoint().getS3Client().completeMultipartUpload(compRequest);
} catch (Exception e) {
getEndpoint().getS3Client().abortMultipartUpload(new AbortMultipartUploadRequest(getConfiguration().getBucketName(), keyName, initResponse.getUploadId()));
throw e;
}
Message message = getMessageForResponse(exchange);
message.setHeader(S3Constants.E_TAG, uploadResult.getETag());
if (uploadResult.getVersionId() != null) {
message.setHeader(S3Constants.VERSION_ID, uploadResult.getVersionId());
}
if (getConfiguration().isDeleteAfterWrite() && filePayload != null) {
FileUtil.deleteFile(filePayload);
}
}
use of software.amazon.awssdk.services.s3.model.UploadPartRequest in project nifi by apache.
the class PutS3Object method onTrigger.
@Override
public void onTrigger(final ProcessContext context, final ProcessSession session) {
FlowFile flowFile = session.get();
if (flowFile == null) {
return;
}
final long startNanos = System.nanoTime();
final String bucket = context.getProperty(BUCKET).evaluateAttributeExpressions(flowFile).getValue();
final String key = context.getProperty(KEY).evaluateAttributeExpressions(flowFile).getValue();
final String cacheKey = getIdentifier() + "/" + bucket + "/" + key;
final AmazonS3Client s3 = getClient();
final FlowFile ff = flowFile;
final Map<String, String> attributes = new HashMap<>();
final String ffFilename = ff.getAttributes().get(CoreAttributes.FILENAME.key());
attributes.put(S3_BUCKET_KEY, bucket);
attributes.put(S3_OBJECT_KEY, key);
final Long multipartThreshold = context.getProperty(MULTIPART_THRESHOLD).asDataSize(DataUnit.B).longValue();
final Long multipartPartSize = context.getProperty(MULTIPART_PART_SIZE).asDataSize(DataUnit.B).longValue();
final long now = System.currentTimeMillis();
/*
* If necessary, run age off for existing uploads in AWS S3 and local state
*/
ageoffS3Uploads(context, s3, now);
/*
* Then
*/
try {
session.read(flowFile, new InputStreamCallback() {
@Override
public void process(final InputStream rawIn) throws IOException {
try (final InputStream in = new BufferedInputStream(rawIn)) {
final ObjectMetadata objectMetadata = new ObjectMetadata();
objectMetadata.setContentDisposition(ff.getAttribute(CoreAttributes.FILENAME.key()));
objectMetadata.setContentLength(ff.getSize());
final String contentType = context.getProperty(CONTENT_TYPE).evaluateAttributeExpressions(ff).getValue();
if (contentType != null) {
objectMetadata.setContentType(contentType);
attributes.put(S3_CONTENT_TYPE, contentType);
}
final String expirationRule = context.getProperty(EXPIRATION_RULE_ID).evaluateAttributeExpressions(ff).getValue();
if (expirationRule != null) {
objectMetadata.setExpirationTimeRuleId(expirationRule);
}
final Map<String, String> userMetadata = new HashMap<>();
for (final Map.Entry<PropertyDescriptor, String> entry : context.getProperties().entrySet()) {
if (entry.getKey().isDynamic()) {
final String value = context.getProperty(entry.getKey()).evaluateAttributeExpressions(ff).getValue();
userMetadata.put(entry.getKey().getName(), value);
}
}
final String serverSideEncryption = context.getProperty(SERVER_SIDE_ENCRYPTION).getValue();
if (!serverSideEncryption.equals(NO_SERVER_SIDE_ENCRYPTION)) {
objectMetadata.setSSEAlgorithm(serverSideEncryption);
attributes.put(S3_SSE_ALGORITHM, serverSideEncryption);
}
if (!userMetadata.isEmpty()) {
objectMetadata.setUserMetadata(userMetadata);
}
if (ff.getSize() <= multipartThreshold) {
// ----------------------------------------
// single part upload
// ----------------------------------------
final PutObjectRequest request = new PutObjectRequest(bucket, key, in, objectMetadata);
request.setStorageClass(StorageClass.valueOf(context.getProperty(STORAGE_CLASS).getValue()));
final AccessControlList acl = createACL(context, ff);
if (acl != null) {
request.setAccessControlList(acl);
}
final CannedAccessControlList cannedAcl = createCannedACL(context, ff);
if (cannedAcl != null) {
request.withCannedAcl(cannedAcl);
}
try {
final PutObjectResult result = s3.putObject(request);
if (result.getVersionId() != null) {
attributes.put(S3_VERSION_ATTR_KEY, result.getVersionId());
}
if (result.getETag() != null) {
attributes.put(S3_ETAG_ATTR_KEY, result.getETag());
}
if (result.getExpirationTime() != null) {
attributes.put(S3_EXPIRATION_ATTR_KEY, result.getExpirationTime().toString());
}
if (result.getMetadata().getRawMetadata().keySet().contains(S3_STORAGECLASS_META_KEY)) {
attributes.put(S3_STORAGECLASS_ATTR_KEY, result.getMetadata().getRawMetadataValue(S3_STORAGECLASS_META_KEY).toString());
}
if (userMetadata.size() > 0) {
StringBuilder userMetaBldr = new StringBuilder();
for (String userKey : userMetadata.keySet()) {
userMetaBldr.append(userKey).append("=").append(userMetadata.get(userKey));
}
attributes.put(S3_USERMETA_ATTR_KEY, userMetaBldr.toString());
}
attributes.put(S3_API_METHOD_ATTR_KEY, S3_API_METHOD_PUTOBJECT);
} catch (AmazonClientException e) {
getLogger().info("Failure completing upload flowfile={} bucket={} key={} reason={}", new Object[] { ffFilename, bucket, key, e.getMessage() });
throw (e);
}
} else {
// ----------------------------------------
// multipart upload
// ----------------------------------------
// load or create persistent state
// ------------------------------------------------------------
MultipartState currentState;
try {
currentState = getLocalStateIfInS3(s3, bucket, cacheKey);
if (currentState != null) {
if (currentState.getPartETags().size() > 0) {
final PartETag lastETag = currentState.getPartETags().get(currentState.getPartETags().size() - 1);
getLogger().info("Resuming upload for flowfile='{}' bucket='{}' key='{}' " + "uploadID='{}' filePosition='{}' partSize='{}' storageClass='{}' " + "contentLength='{}' partsLoaded={} lastPart={}/{}", new Object[] { ffFilename, bucket, key, currentState.getUploadId(), currentState.getFilePosition(), currentState.getPartSize(), currentState.getStorageClass().toString(), currentState.getContentLength(), currentState.getPartETags().size(), Integer.toString(lastETag.getPartNumber()), lastETag.getETag() });
} else {
getLogger().info("Resuming upload for flowfile='{}' bucket='{}' key='{}' " + "uploadID='{}' filePosition='{}' partSize='{}' storageClass='{}' " + "contentLength='{}' no partsLoaded", new Object[] { ffFilename, bucket, key, currentState.getUploadId(), currentState.getFilePosition(), currentState.getPartSize(), currentState.getStorageClass().toString(), currentState.getContentLength() });
}
} else {
currentState = new MultipartState();
currentState.setPartSize(multipartPartSize);
currentState.setStorageClass(StorageClass.valueOf(context.getProperty(STORAGE_CLASS).getValue()));
currentState.setContentLength(ff.getSize());
persistLocalState(cacheKey, currentState);
getLogger().info("Starting new upload for flowfile='{}' bucket='{}' key='{}'", new Object[] { ffFilename, bucket, key });
}
} catch (IOException e) {
getLogger().error("IOException initiating cache state while processing flow files: " + e.getMessage());
throw (e);
}
// ------------------------------------------------------------
if (currentState.getUploadId().isEmpty()) {
final InitiateMultipartUploadRequest initiateRequest = new InitiateMultipartUploadRequest(bucket, key, objectMetadata);
initiateRequest.setStorageClass(currentState.getStorageClass());
final AccessControlList acl = createACL(context, ff);
if (acl != null) {
initiateRequest.setAccessControlList(acl);
}
final CannedAccessControlList cannedAcl = createCannedACL(context, ff);
if (cannedAcl != null) {
initiateRequest.withCannedACL(cannedAcl);
}
try {
final InitiateMultipartUploadResult initiateResult = s3.initiateMultipartUpload(initiateRequest);
currentState.setUploadId(initiateResult.getUploadId());
currentState.getPartETags().clear();
try {
persistLocalState(cacheKey, currentState);
} catch (Exception e) {
getLogger().info("Exception saving cache state while processing flow file: " + e.getMessage());
throw (new ProcessException("Exception saving cache state", e));
}
getLogger().info("Success initiating upload flowfile={} available={} position={} " + "length={} bucket={} key={} uploadId={}", new Object[] { ffFilename, in.available(), currentState.getFilePosition(), currentState.getContentLength(), bucket, key, currentState.getUploadId() });
if (initiateResult.getUploadId() != null) {
attributes.put(S3_UPLOAD_ID_ATTR_KEY, initiateResult.getUploadId());
}
} catch (AmazonClientException e) {
getLogger().info("Failure initiating upload flowfile={} bucket={} key={} reason={}", new Object[] { ffFilename, bucket, key, e.getMessage() });
throw (e);
}
} else {
if (currentState.getFilePosition() > 0) {
try {
final long skipped = in.skip(currentState.getFilePosition());
if (skipped != currentState.getFilePosition()) {
getLogger().info("Failure skipping to resume upload flowfile={} " + "bucket={} key={} position={} skipped={}", new Object[] { ffFilename, bucket, key, currentState.getFilePosition(), skipped });
}
} catch (Exception e) {
getLogger().info("Failure skipping to resume upload flowfile={} bucket={} " + "key={} position={} reason={}", new Object[] { ffFilename, bucket, key, currentState.getFilePosition(), e.getMessage() });
throw (new ProcessException(e));
}
}
}
// upload parts
// ------------------------------------------------------------
long thisPartSize;
for (int part = currentState.getPartETags().size() + 1; currentState.getFilePosition() < currentState.getContentLength(); part++) {
if (!PutS3Object.this.isScheduled()) {
throw new IOException(S3_PROCESS_UNSCHEDULED_MESSAGE + " flowfile=" + ffFilename + " part=" + part + " uploadId=" + currentState.getUploadId());
}
thisPartSize = Math.min(currentState.getPartSize(), (currentState.getContentLength() - currentState.getFilePosition()));
UploadPartRequest uploadRequest = new UploadPartRequest().withBucketName(bucket).withKey(key).withUploadId(currentState.getUploadId()).withInputStream(in).withPartNumber(part).withPartSize(thisPartSize);
try {
UploadPartResult uploadPartResult = s3.uploadPart(uploadRequest);
currentState.addPartETag(uploadPartResult.getPartETag());
currentState.setFilePosition(currentState.getFilePosition() + thisPartSize);
try {
persistLocalState(cacheKey, currentState);
} catch (Exception e) {
getLogger().info("Exception saving cache state processing flow file: " + e.getMessage());
}
getLogger().info("Success uploading part flowfile={} part={} available={} " + "etag={} uploadId={}", new Object[] { ffFilename, part, in.available(), uploadPartResult.getETag(), currentState.getUploadId() });
} catch (AmazonClientException e) {
getLogger().info("Failure uploading part flowfile={} part={} bucket={} key={} " + "reason={}", new Object[] { ffFilename, part, bucket, key, e.getMessage() });
throw (e);
}
}
// complete multipart upload
// ------------------------------------------------------------
CompleteMultipartUploadRequest completeRequest = new CompleteMultipartUploadRequest(bucket, key, currentState.getUploadId(), currentState.getPartETags());
try {
CompleteMultipartUploadResult completeResult = s3.completeMultipartUpload(completeRequest);
getLogger().info("Success completing upload flowfile={} etag={} uploadId={}", new Object[] { ffFilename, completeResult.getETag(), currentState.getUploadId() });
if (completeResult.getVersionId() != null) {
attributes.put(S3_VERSION_ATTR_KEY, completeResult.getVersionId());
}
if (completeResult.getETag() != null) {
attributes.put(S3_ETAG_ATTR_KEY, completeResult.getETag());
}
if (completeResult.getExpirationTime() != null) {
attributes.put(S3_EXPIRATION_ATTR_KEY, completeResult.getExpirationTime().toString());
}
if (currentState.getStorageClass() != null) {
attributes.put(S3_STORAGECLASS_ATTR_KEY, currentState.getStorageClass().toString());
}
if (userMetadata.size() > 0) {
StringBuilder userMetaBldr = new StringBuilder();
for (String userKey : userMetadata.keySet()) {
userMetaBldr.append(userKey).append("=").append(userMetadata.get(userKey));
}
attributes.put(S3_USERMETA_ATTR_KEY, userMetaBldr.toString());
}
attributes.put(S3_API_METHOD_ATTR_KEY, S3_API_METHOD_MULTIPARTUPLOAD);
} catch (AmazonClientException e) {
getLogger().info("Failure completing upload flowfile={} bucket={} key={} reason={}", new Object[] { ffFilename, bucket, key, e.getMessage() });
throw (e);
}
}
}
}
});
if (!attributes.isEmpty()) {
flowFile = session.putAllAttributes(flowFile, attributes);
}
session.transfer(flowFile, REL_SUCCESS);
final String url = s3.getResourceUrl(bucket, key);
final long millis = TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - startNanos);
session.getProvenanceReporter().send(flowFile, url, millis);
getLogger().info("Successfully put {} to Amazon S3 in {} milliseconds", new Object[] { ff, millis });
try {
removeLocalState(cacheKey);
} catch (IOException e) {
getLogger().info("Error trying to delete key {} from cache: {}", new Object[] { cacheKey, e.getMessage() });
}
} catch (final ProcessException | AmazonClientException pe) {
if (pe.getMessage().contains(S3_PROCESS_UNSCHEDULED_MESSAGE)) {
getLogger().info(pe.getMessage());
session.rollback();
} else {
getLogger().error("Failed to put {} to Amazon S3 due to {}", new Object[] { flowFile, pe });
flowFile = session.penalize(flowFile);
session.transfer(flowFile, REL_FAILURE);
}
}
}
use of software.amazon.awssdk.services.s3.model.UploadPartRequest in project apex-malhar by apache.
the class S3BlockUploadOperator method uploadBlockIntoS3.
/**
* Upload the block into S3 bucket.
* @param tuple block data
*/
protected void uploadBlockIntoS3(AbstractBlockReader.ReaderRecord<Slice> tuple) {
if (currentWindowId <= windowDataManager.getLargestCompletedWindow()) {
return;
}
// Check whether the block metadata is present for this block
if (blockIdToFilePath.get(tuple.getBlockId()) == null) {
if (!waitingTuples.contains(tuple)) {
waitingTuples.add(tuple);
}
return;
}
String uniqueBlockId = getUniqueBlockIdFromFile(tuple.getBlockId(), blockIdToFilePath.get(tuple.getBlockId()));
S3BlockMetaData metaData = blockInfo.get(uniqueBlockId);
// Check whether the file metadata is received
if (metaData == null) {
if (!waitingTuples.contains(tuple)) {
waitingTuples.add(tuple);
}
return;
}
long partSize = tuple.getRecord().length;
PartETag partETag = null;
ByteArrayInputStream bis = new ByteArrayInputStream(tuple.getRecord().buffer);
// Check if it is a Single block of a file
if (metaData.isLastBlock && metaData.partNo == 1) {
ObjectMetadata omd = createObjectMetadata();
omd.setContentLength(partSize);
PutObjectResult result = s3Client.putObject(new PutObjectRequest(bucketName, metaData.getKeyName(), bis, omd));
partETag = new PartETag(1, result.getETag());
} else {
// Else upload use multi-part feature
try {
// Create request to upload a part.
UploadPartRequest uploadRequest = new UploadPartRequest().withBucketName(bucketName).withKey(metaData.getKeyName()).withUploadId(metaData.getUploadId()).withPartNumber(metaData.getPartNo()).withInputStream(bis).withPartSize(partSize);
partETag = s3Client.uploadPart(uploadRequest).getPartETag();
} catch (Exception e) {
throw new RuntimeException(e);
}
}
UploadBlockMetadata uploadmetadata = new UploadBlockMetadata(partETag, metaData.getKeyName());
output.emit(uploadmetadata);
currentWindowRecoveryState.put(uniqueBlockId, uploadmetadata);
try {
bis.close();
} catch (IOException e) {
throw new RuntimeException(e);
}
}
Aggregations