use of com.amazonaws.util.LengthCheckInputStream in project aws-sdk-android by aws-amplify.
the class AmazonS3Client method getObject.
/*
* (non-Javadoc)
* @see
* com.amazonaws.services.s3.AmazonS3#getObject(com.amazonaws.services.s3
* .model.GetObjectRequest)
*/
@Override
public S3Object getObject(GetObjectRequest getObjectRequest) throws AmazonClientException, AmazonServiceException {
assertParameterNotNull(getObjectRequest, "The GetObjectRequest parameter must be specified when requesting an object");
assertParameterNotNull(getObjectRequest.getBucketName(), "The bucket name parameter must be specified when requesting an object");
assertParameterNotNull(getObjectRequest.getKey(), "The key parameter must be specified when requesting an object");
final Request<GetObjectRequest> request = createRequest(getObjectRequest.getBucketName(), getObjectRequest.getKey(), getObjectRequest, HttpMethodName.GET);
if (getObjectRequest.getVersionId() != null) {
request.addParameter("versionId", getObjectRequest.getVersionId());
}
// Range
final long[] range = getObjectRequest.getRange();
if (range != null) {
String rangeHeader = "bytes=" + Long.toString(range[0]) + "-";
if (range[1] >= 0) {
/*
* Negative value is invalid per S3 range get and will result in
* downloading the entire object. Leaving last byte empty so as
* to resume download from range[0].
*/
rangeHeader += Long.toString(range[1]);
}
request.addHeader(Headers.RANGE, rangeHeader);
}
populateRequesterPaysHeader(request, getObjectRequest.isRequesterPays());
addResponseHeaderParameters(request, getObjectRequest.getResponseHeaders());
addDateHeader(request, Headers.GET_OBJECT_IF_MODIFIED_SINCE, getObjectRequest.getModifiedSinceConstraint());
addDateHeader(request, Headers.GET_OBJECT_IF_UNMODIFIED_SINCE, getObjectRequest.getUnmodifiedSinceConstraint());
addStringListHeader(request, Headers.GET_OBJECT_IF_MATCH, getObjectRequest.getMatchingETagConstraints());
addStringListHeader(request, Headers.GET_OBJECT_IF_NONE_MATCH, getObjectRequest.getNonmatchingETagConstraints());
// Populate the SSE-CPK parameters to the request header
populateSSE_C(request, getObjectRequest.getSSECustomerKey());
/*
* This is compatible with progress listener set by either the legacy
* method GetObjectRequest#setProgressListener or the new method
* GetObjectRequest#setGeneralProgressListener.
*/
final ProgressListener progressListener = getObjectRequest.getGeneralProgressListener();
final ProgressListenerCallbackExecutor progressListenerCallbackExecutor = ProgressListenerCallbackExecutor.wrapListener(progressListener);
try {
final S3Object s3Object = invoke(request, new S3ObjectResponseHandler(), getObjectRequest.getBucketName(), getObjectRequest.getKey());
/*
* TODO: For now, it's easiest to set there here in the client, but
* we could push this back into the response handler with a little
* more work.
*/
s3Object.setBucketName(getObjectRequest.getBucketName());
s3Object.setKey(getObjectRequest.getKey());
InputStream input = s3Object.getObjectContent();
// Hold a reference to this client while the InputStream is still
// around - otherwise a finalizer in the HttpClient may reset the
// underlying TCP connection out from under us.
input = new ServiceClientHolderInputStream(input, this);
// stream in a filter that will trigger progress reports.
if (progressListenerCallbackExecutor != null) {
@SuppressWarnings("resource") final ProgressReportingInputStream progressReportingInputStream = new ProgressReportingInputStream(input, progressListenerCallbackExecutor);
progressReportingInputStream.setFireCompletedEvent(true);
progressReportingInputStream.setNotificationThreshold(this.notificationThreshold);
input = progressReportingInputStream;
fireProgressEvent(progressListenerCallbackExecutor, ProgressEvent.STARTED_EVENT_CODE);
}
// Ensures the data received from S3 has the same length as the
// expected content-length
input = new LengthCheckInputStream(input, // expected length
s3Object.getObjectMetadata().getContentLength(), // bytes received from S3 are all included even if skipped
INCLUDE_SKIPPED_BYTES);
// Re-wrap within an S3ObjectInputStream. Explicitly do not collect
// metrics here because we know we're ultimately wrapping another
// S3ObjectInputStream which will take care of that.
s3Object.setObjectContent(new S3ObjectInputStream(input));
return s3Object;
} catch (final AmazonS3Exception ase) {
/*
* If the request failed because one of the specified constraints
* was not met (ex: matching ETag, modified since date, etc.), then
* return null, so that users don't have to wrap their code in
* try/catch blocks and check for this status code if they want to
* use constraints.
*/
if (ase.getStatusCode() == 412 || ase.getStatusCode() == 304) {
fireProgressEvent(progressListenerCallbackExecutor, ProgressEvent.CANCELED_EVENT_CODE);
return null;
}
fireProgressEvent(progressListenerCallbackExecutor, ProgressEvent.FAILED_EVENT_CODE);
throw ase;
}
}
use of com.amazonaws.util.LengthCheckInputStream in project aws-sdk-android by aws-amplify.
the class AmazonS3Client method putObject.
/*
* (non-Javadoc)
* @see
* com.amazonaws.services.s3.AmazonS3#putObject(com.amazonaws.services.s3
* .model.PutObjectRequest)
*/
@Override
public PutObjectResult putObject(PutObjectRequest putObjectRequest) throws AmazonClientException, AmazonServiceException {
assertParameterNotNull(putObjectRequest, "The PutObjectRequest parameter must be specified when uploading an object");
final String bucketName = putObjectRequest.getBucketName();
final String key = putObjectRequest.getKey();
ObjectMetadata metadata = putObjectRequest.getMetadata();
InputStream input = putObjectRequest.getInputStream();
/*
* This is compatible with progress listener set by either the legacy
* method PutObjectRequest#setProgressListener or the new method
* PutObjectRequest#setGeneralProgressListener.
*/
final ProgressListener progressListener = putObjectRequest.getGeneralProgressListener();
final ProgressListenerCallbackExecutor progressListenerCallbackExecutor = ProgressListenerCallbackExecutor.wrapListener(progressListener);
if (metadata == null) {
metadata = new ObjectMetadata();
}
assertParameterNotNull(bucketName, "The bucket name parameter must be specified when uploading an object");
assertParameterNotNull(key, "The key parameter must be specified when uploading an object");
final boolean skipContentMd5Check = ServiceUtils.skipMd5CheckPerRequest(putObjectRequest, clientOptions);
// information from it to auto-configure a few options
if (putObjectRequest.getFile() != null) {
final File file = putObjectRequest.getFile();
// Always set the content length, even if it's already set
metadata.setContentLength(file.length());
final boolean calculateMD5 = metadata.getContentMD5() == null;
// Only set the content type if it hasn't already been set
if (metadata.getContentType() == null) {
metadata.setContentType(Mimetypes.getInstance().getMimetype(file));
}
if (calculateMD5 && !skipContentMd5Check) {
try {
final String contentMd5_b64 = Md5Utils.md5AsBase64(file);
metadata.setContentMD5(contentMd5_b64);
} catch (final Exception e) {
throw new AmazonClientException("Unable to calculate MD5 hash: " + e.getMessage(), e);
}
}
try {
input = new RepeatableFileInputStream(file);
} catch (final FileNotFoundException fnfe) {
throw new AmazonClientException("Unable to find file to upload", fnfe);
}
}
final Request<PutObjectRequest> request = createRequest(bucketName, key, putObjectRequest, HttpMethodName.PUT);
if (putObjectRequest.getAccessControlList() != null) {
addAclHeaders(request, putObjectRequest.getAccessControlList());
} else if (putObjectRequest.getCannedAcl() != null) {
request.addHeader(Headers.S3_CANNED_ACL, putObjectRequest.getCannedAcl().toString());
}
if (putObjectRequest.getStorageClass() != null) {
request.addHeader(Headers.STORAGE_CLASS, putObjectRequest.getStorageClass());
}
if (putObjectRequest.getRedirectLocation() != null) {
request.addHeader(Headers.REDIRECT_LOCATION, putObjectRequest.getRedirectLocation());
if (input == null) {
setZeroContentLength(request);
input = new ByteArrayInputStream(new byte[0]);
}
}
addHeaderIfNotNull(request, Headers.S3_TAGGING, urlEncodeTags(putObjectRequest.getTagging()));
populateRequesterPaysHeader(request, putObjectRequest.isRequesterPays());
// Populate the SSE-CPK parameters to the request header
populateSSE_C(request, putObjectRequest.getSSECustomerKey());
// Use internal interface to differentiate 0 from unset.
final Long contentLength = (Long) metadata.getRawMetadataValue(Headers.CONTENT_LENGTH);
if (contentLength == null) {
/*
* There's nothing we can do except for let the HTTP client buffer
* the input stream contents if the caller doesn't tell us how much
* data to expect in a stream since we have to explicitly tell
* Amazon S3 how much we're sending before we start sending any of
* it.
*/
if (!input.markSupported()) {
log.warn("No content length specified for stream data. " + "Stream contents will be buffered in memory and could result in " + "out of memory errors.");
final ByteArrayInputStream bais = toByteArray(input);
request.addHeader(Headers.CONTENT_LENGTH, String.valueOf(bais.available()));
request.setStreaming(true);
input = bais;
} else {
final long len = calculateContentLength(input);
request.addHeader(Headers.CONTENT_LENGTH, String.valueOf(len));
}
} else {
final long expectedLength = contentLength.longValue();
if (expectedLength >= 0) {
// Performs length check on the underlying data stream.
// For S3 encryption client, the underlying data stream here
// refers to the cipher-text data stream (ie not the underlying
// plain-text data stream which in turn may have been wrapped
// with it's own length check input stream.)
@SuppressWarnings("resource") final LengthCheckInputStream lcis = new LengthCheckInputStream(input, // expected data length to be uploaded
expectedLength, EXCLUDE_SKIPPED_BYTES);
input = lcis;
request.addHeader(Headers.CONTENT_LENGTH, contentLength.toString());
}
}
if (progressListenerCallbackExecutor != null) {
input = new ProgressReportingInputStream(input, progressListenerCallbackExecutor);
((ProgressReportingInputStream) input).setNotificationThreshold(this.notificationThreshold);
fireProgressEvent(progressListenerCallbackExecutor, ProgressEvent.STARTED_EVENT_CODE);
}
if (metadata.getContentType() == null) {
/*
* Default to the "application/octet-stream" if the user hasn't
* specified a content type.
*/
metadata.setContentType(Mimetypes.MIMETYPE_OCTET_STREAM);
}
populateRequestMetadata(request, metadata);
populateSSE_KMS(request, putObjectRequest.getSSEAwsKeyManagementParams());
request.setContent(input);
/*
* Enable 100-continue support for PUT operations, since this is where
* we're potentially uploading large amounts of data and want to find
* out as early as possible if an operation will fail. We don't want to
* do this for all operations since it will cause extra latency in the
* network interaction.
*/
/*
* HttpUrlConnection seems to be buggy in terms of implementation of
* expect continue.
*/
// request.addHeader("Expect", "100-continue");
ObjectMetadata returnedMetadata = null;
try {
returnedMetadata = invoke(request, new S3MetadataResponseHandler(), bucketName, key);
} catch (final AmazonClientException ace) {
fireProgressEvent(progressListenerCallbackExecutor, ProgressEvent.FAILED_EVENT_CODE);
throw ace;
} finally {
try {
input.close();
} catch (final AbortedException ignore) {
} catch (final Exception e) {
log.debug("Unable to cleanly close input stream: " + e.getMessage(), e);
}
}
fireProgressEvent(progressListenerCallbackExecutor, ProgressEvent.COMPLETED_EVENT_CODE);
final PutObjectResult result = new PutObjectResult();
result.setVersionId(returnedMetadata.getVersionId());
result.setSSEAlgorithm(returnedMetadata.getSSEAlgorithm());
result.setSSECustomerAlgorithm(returnedMetadata.getSSECustomerAlgorithm());
result.setSSECustomerKeyMd5(returnedMetadata.getSSECustomerKeyMd5());
result.setExpirationTime(returnedMetadata.getExpirationTime());
result.setExpirationTimeRuleId(returnedMetadata.getExpirationTimeRuleId());
result.setETag(returnedMetadata.getETag());
result.setMetadata(returnedMetadata);
result.setRequesterCharged(returnedMetadata.isRequesterCharged());
result.setContentMd5(returnedMetadata.getContentMD5());
return result;
}
use of com.amazonaws.util.LengthCheckInputStream in project aws-sdk-android by aws-amplify.
the class S3CryptoModuleBase method newS3CipherLiteInputStream.
private CipherLiteInputStream newS3CipherLiteInputStream(AbstractPutObjectRequest req, ContentCryptoMaterial cekMaterial, long plaintextLength) {
final File fileOrig = req.getFile();
final InputStream isOrig = req.getInputStream();
InputStream isCurr = null;
try {
if (fileOrig == null) {
// When input is a FileInputStream, this wrapping enables
// unlimited mark-and-reset
isCurr = isOrig == null ? null : ReleasableInputStream.wrap(isOrig);
} else {
isCurr = new ResettableInputStream(fileOrig);
}
if (plaintextLength > -1) {
// S3 allows a single PUT to be no more than 5GB, which
// therefore won't exceed the maximum length that can be
// encrypted either using any cipher such as CBC or GCM.
// This ensures the plain-text read from the underlying data
// stream has the same length as the expected total.
isCurr = new LengthCheckInputStream(isCurr, plaintextLength, EXCLUDE_SKIPPED_BYTES);
}
final CipherLite cipherLite = cekMaterial.getCipherLite();
if (cipherLite.markSupported()) {
return new CipherLiteInputStream(isCurr, cipherLite, DEFAULT_BUFFER_SIZE);
} else {
return new RenewableCipherLiteInputStream(isCurr, cipherLite, DEFAULT_BUFFER_SIZE);
}
} catch (final Exception e) {
cleanupDataSource(req, fileOrig, isOrig, isCurr, log);
throw new AmazonClientException("Unable to create cipher input stream", e);
}
}
Aggregations