Search in sources :

Example 1 with RepeatableFileInputStream

use of com.amazonaws.services.s3.internal.RepeatableFileInputStream in project aws-sdk-android by aws-amplify.

the class InputStreamsTest method testMarkReset.

/**
 * Tests that we can combine InputSubstream with RepeatableFileInputStream
 * and correctly mark/reset the streams.
 */
@Test
public void testMarkReset() throws Exception {
    File tempFile = File.createTempFile("aws-java-sdk-inputsubstream-test", ".dat");
    FileOutputStream outputStream = new FileOutputStream(tempFile);
    outputStream.write(sampleData.getBytes(StringUtils.UTF8));
    outputStream.close();
    RepeatableFileInputStream repeatableFileInputStream = new RepeatableFileInputStream(tempFile);
    InputSubstream in = new InputSubstream(repeatableFileInputStream, 10, 10, true);
    assertEquals(10, in.available());
    byte[] buffer = new byte[5];
    in.mark(1024);
    assertEquals(5, in.read(buffer));
    assertEquals("12345", new String(buffer, StringUtils.UTF8));
    assertEquals(5, in.available());
    in.reset();
    assertEquals(10, in.available());
    assertEquals(5, in.read(buffer));
    assertEquals("12345", new String(buffer, StringUtils.UTF8));
    assertEquals(5, in.available());
    assertEquals(5, in.read(buffer));
    assertEquals("67890", new String(buffer, StringUtils.UTF8));
    assertEquals(0, in.available());
}
Also used : InputSubstream(com.amazonaws.services.s3.internal.InputSubstream) FileOutputStream(java.io.FileOutputStream) File(java.io.File) RepeatableFileInputStream(com.amazonaws.services.s3.internal.RepeatableFileInputStream) Test(org.junit.Test)

Example 2 with RepeatableFileInputStream

use of com.amazonaws.services.s3.internal.RepeatableFileInputStream in project aws-sdk-android by aws-amplify.

the class AmazonS3Client method uploadPart.

/*
     * (non-Javadoc)
     * @see
     * com.amazonaws.services.s3.AmazonS3#uploadPart(com.amazonaws.services.
     * s3.model.UploadPartRequest)
     */
@Override
public UploadPartResult uploadPart(UploadPartRequest uploadPartRequest) throws AmazonClientException, AmazonServiceException {
    assertParameterNotNull(uploadPartRequest, "The request parameter must be specified when uploading a part");
    final String bucketName = uploadPartRequest.getBucketName();
    final String key = uploadPartRequest.getKey();
    final String uploadId = uploadPartRequest.getUploadId();
    final int partNumber = uploadPartRequest.getPartNumber();
    final long partSize = uploadPartRequest.getPartSize();
    assertParameterNotNull(bucketName, "The bucket name parameter must be specified when uploading a part");
    assertParameterNotNull(key, "The key parameter must be specified when uploading a part");
    assertParameterNotNull(uploadId, "The upload ID parameter must be specified when uploading a part");
    assertParameterNotNull(partNumber, "The part number parameter must be specified when uploading a part");
    assertParameterNotNull(partSize, "The part size parameter must be specified when uploading a part");
    final Request<UploadPartRequest> request = createRequest(bucketName, key, uploadPartRequest, HttpMethodName.PUT);
    request.addParameter("uploadId", uploadId);
    request.addParameter("partNumber", Integer.toString(partNumber));
    final ObjectMetadata objectMetadata = uploadPartRequest.getObjectMetadata();
    if (objectMetadata != null) {
        populateRequestMetadata(request, objectMetadata);
    }
    request.addHeader(Headers.CONTENT_LENGTH, Long.toString(partSize));
    /*
         * HttpUrlConnection seems to be buggy in terms of implementation of
         * expect continue.
         */
    // request.addHeader("Expect", "100-continue");
    populateRequesterPaysHeader(request, uploadPartRequest.isRequesterPays());
    // Populate the SSE-CPK parameters to the request header
    populateSSE_C(request, uploadPartRequest.getSSECustomerKey());
    InputStream inputStream = null;
    if (uploadPartRequest.getInputStream() != null) {
        inputStream = uploadPartRequest.getInputStream();
    } else if (uploadPartRequest.getFile() != null) {
        try {
            inputStream = new InputSubstream(new RepeatableFileInputStream(uploadPartRequest.getFile()), uploadPartRequest.getFileOffset(), partSize, true);
        } catch (final FileNotFoundException e) {
            throw new IllegalArgumentException("The specified file doesn't exist", e);
        }
    } else {
        throw new IllegalArgumentException("A File or InputStream must be specified when uploading part");
    }
    // until request is invoked.
    if (uploadPartRequest.getMd5Digest() == null && !ServiceUtils.skipMd5CheckPerRequest(uploadPartRequest, clientOptions) && inputStream.markSupported()) {
        try {
            final String contentMd5_b64 = Md5Utils.md5AsBase64(inputStream);
            addHeaderIfNotNull(request, Headers.CONTENT_MD5, contentMd5_b64);
            inputStream.reset();
        } catch (final Exception e) {
            throw new AmazonClientException("Unable to calculate MD5 hash: " + e.getMessage(), e);
        }
    }
    /*
         * This is compatible with progress listener set by either the legacy
         * method UploadPartRequest#setProgressListener or the new method
         * UploadPartRequest#setGeneralProgressListener.
         */
    final ProgressListener progressListener = uploadPartRequest.getGeneralProgressListener();
    final ProgressListenerCallbackExecutor progressListenerCallbackExecutor = ProgressListenerCallbackExecutor.wrapListener(progressListener);
    if (progressListenerCallbackExecutor != null) {
        inputStream = new ProgressReportingInputStream(inputStream, progressListenerCallbackExecutor);
        ((ProgressReportingInputStream) inputStream).setNotificationThreshold(this.notificationThreshold);
        fireProgressEvent(progressListenerCallbackExecutor, ProgressEvent.PART_STARTED_EVENT_CODE);
    }
    try {
        request.setContent(inputStream);
        final ObjectMetadata metadata = invoke(request, new S3MetadataResponseHandler(), bucketName, key);
        fireProgressEvent(progressListenerCallbackExecutor, ProgressEvent.PART_COMPLETED_EVENT_CODE);
        final UploadPartResult result = new UploadPartResult();
        result.setETag(metadata.getETag());
        result.setPartNumber(partNumber);
        result.setSSEAlgorithm(metadata.getSSEAlgorithm());
        result.setSSECustomerAlgorithm(metadata.getSSECustomerAlgorithm());
        result.setSSECustomerKeyMd5(metadata.getSSECustomerKeyMd5());
        result.setRequesterCharged(metadata.isRequesterCharged());
        return result;
    } catch (final AmazonClientException ace) {
        fireProgressEvent(progressListenerCallbackExecutor, ProgressEvent.PART_FAILED_EVENT_CODE);
        throw ace;
    } finally {
        if (inputStream != null) {
            try {
                inputStream.close();
            } catch (final Exception e) {
            }
        }
    }
}
Also used : ServiceClientHolderInputStream(com.amazonaws.util.ServiceClientHolderInputStream) LengthCheckInputStream(com.amazonaws.util.LengthCheckInputStream) ByteArrayInputStream(java.io.ByteArrayInputStream) ProgressReportingInputStream(com.amazonaws.event.ProgressReportingInputStream) RepeatableFileInputStream(com.amazonaws.services.s3.internal.RepeatableFileInputStream) InputStream(java.io.InputStream) AmazonClientException(com.amazonaws.AmazonClientException) FileNotFoundException(java.io.FileNotFoundException) RepeatableFileInputStream(com.amazonaws.services.s3.internal.RepeatableFileInputStream) AmazonServiceException(com.amazonaws.AmazonServiceException) AmazonClientException(com.amazonaws.AmazonClientException) IOException(java.io.IOException) URISyntaxException(java.net.URISyntaxException) FileNotFoundException(java.io.FileNotFoundException) AbortedException(com.amazonaws.AbortedException) ProgressListenerCallbackExecutor(com.amazonaws.event.ProgressListenerCallbackExecutor) InputSubstream(com.amazonaws.services.s3.internal.InputSubstream) ProgressListener(com.amazonaws.event.ProgressListener) S3MetadataResponseHandler(com.amazonaws.services.s3.internal.S3MetadataResponseHandler) ProgressReportingInputStream(com.amazonaws.event.ProgressReportingInputStream)

Example 3 with RepeatableFileInputStream

use of com.amazonaws.services.s3.internal.RepeatableFileInputStream in project aws-sdk-android by aws-amplify.

the class AmazonS3Client method putObject.

/*
     * (non-Javadoc)
     * @see
     * com.amazonaws.services.s3.AmazonS3#putObject(com.amazonaws.services.s3
     * .model.PutObjectRequest)
     */
@Override
public PutObjectResult putObject(PutObjectRequest putObjectRequest) throws AmazonClientException, AmazonServiceException {
    assertParameterNotNull(putObjectRequest, "The PutObjectRequest parameter must be specified when uploading an object");
    final String bucketName = putObjectRequest.getBucketName();
    final String key = putObjectRequest.getKey();
    ObjectMetadata metadata = putObjectRequest.getMetadata();
    InputStream input = putObjectRequest.getInputStream();
    /*
         * This is compatible with progress listener set by either the legacy
         * method PutObjectRequest#setProgressListener or the new method
         * PutObjectRequest#setGeneralProgressListener.
         */
    final ProgressListener progressListener = putObjectRequest.getGeneralProgressListener();
    final ProgressListenerCallbackExecutor progressListenerCallbackExecutor = ProgressListenerCallbackExecutor.wrapListener(progressListener);
    if (metadata == null) {
        metadata = new ObjectMetadata();
    }
    assertParameterNotNull(bucketName, "The bucket name parameter must be specified when uploading an object");
    assertParameterNotNull(key, "The key parameter must be specified when uploading an object");
    final boolean skipContentMd5Check = ServiceUtils.skipMd5CheckPerRequest(putObjectRequest, clientOptions);
    // information from it to auto-configure a few options
    if (putObjectRequest.getFile() != null) {
        final File file = putObjectRequest.getFile();
        // Always set the content length, even if it's already set
        metadata.setContentLength(file.length());
        final boolean calculateMD5 = metadata.getContentMD5() == null;
        // Only set the content type if it hasn't already been set
        if (metadata.getContentType() == null) {
            metadata.setContentType(Mimetypes.getInstance().getMimetype(file));
        }
        if (calculateMD5 && !skipContentMd5Check) {
            try {
                final String contentMd5_b64 = Md5Utils.md5AsBase64(file);
                metadata.setContentMD5(contentMd5_b64);
            } catch (final Exception e) {
                throw new AmazonClientException("Unable to calculate MD5 hash: " + e.getMessage(), e);
            }
        }
        try {
            input = new RepeatableFileInputStream(file);
        } catch (final FileNotFoundException fnfe) {
            throw new AmazonClientException("Unable to find file to upload", fnfe);
        }
    }
    final Request<PutObjectRequest> request = createRequest(bucketName, key, putObjectRequest, HttpMethodName.PUT);
    if (putObjectRequest.getAccessControlList() != null) {
        addAclHeaders(request, putObjectRequest.getAccessControlList());
    } else if (putObjectRequest.getCannedAcl() != null) {
        request.addHeader(Headers.S3_CANNED_ACL, putObjectRequest.getCannedAcl().toString());
    }
    if (putObjectRequest.getStorageClass() != null) {
        request.addHeader(Headers.STORAGE_CLASS, putObjectRequest.getStorageClass());
    }
    if (putObjectRequest.getRedirectLocation() != null) {
        request.addHeader(Headers.REDIRECT_LOCATION, putObjectRequest.getRedirectLocation());
        if (input == null) {
            setZeroContentLength(request);
            input = new ByteArrayInputStream(new byte[0]);
        }
    }
    addHeaderIfNotNull(request, Headers.S3_TAGGING, urlEncodeTags(putObjectRequest.getTagging()));
    populateRequesterPaysHeader(request, putObjectRequest.isRequesterPays());
    // Populate the SSE-CPK parameters to the request header
    populateSSE_C(request, putObjectRequest.getSSECustomerKey());
    // Use internal interface to differentiate 0 from unset.
    final Long contentLength = (Long) metadata.getRawMetadataValue(Headers.CONTENT_LENGTH);
    if (contentLength == null) {
        /*
             * There's nothing we can do except for let the HTTP client buffer
             * the input stream contents if the caller doesn't tell us how much
             * data to expect in a stream since we have to explicitly tell
             * Amazon S3 how much we're sending before we start sending any of
             * it.
             */
        if (!input.markSupported()) {
            log.warn("No content length specified for stream data.  " + "Stream contents will be buffered in memory and could result in " + "out of memory errors.");
            final ByteArrayInputStream bais = toByteArray(input);
            request.addHeader(Headers.CONTENT_LENGTH, String.valueOf(bais.available()));
            request.setStreaming(true);
            input = bais;
        } else {
            final long len = calculateContentLength(input);
            request.addHeader(Headers.CONTENT_LENGTH, String.valueOf(len));
        }
    } else {
        final long expectedLength = contentLength.longValue();
        if (expectedLength >= 0) {
            // Performs length check on the underlying data stream.
            // For S3 encryption client, the underlying data stream here
            // refers to the cipher-text data stream (ie not the underlying
            // plain-text data stream which in turn may have been wrapped
            // with it's own length check input stream.)
            @SuppressWarnings("resource") final LengthCheckInputStream lcis = new LengthCheckInputStream(input, // expected data length to be uploaded
            expectedLength, EXCLUDE_SKIPPED_BYTES);
            input = lcis;
            request.addHeader(Headers.CONTENT_LENGTH, contentLength.toString());
        }
    }
    if (progressListenerCallbackExecutor != null) {
        input = new ProgressReportingInputStream(input, progressListenerCallbackExecutor);
        ((ProgressReportingInputStream) input).setNotificationThreshold(this.notificationThreshold);
        fireProgressEvent(progressListenerCallbackExecutor, ProgressEvent.STARTED_EVENT_CODE);
    }
    if (metadata.getContentType() == null) {
        /*
             * Default to the "application/octet-stream" if the user hasn't
             * specified a content type.
             */
        metadata.setContentType(Mimetypes.MIMETYPE_OCTET_STREAM);
    }
    populateRequestMetadata(request, metadata);
    populateSSE_KMS(request, putObjectRequest.getSSEAwsKeyManagementParams());
    request.setContent(input);
    /*
         * Enable 100-continue support for PUT operations, since this is where
         * we're potentially uploading large amounts of data and want to find
         * out as early as possible if an operation will fail. We don't want to
         * do this for all operations since it will cause extra latency in the
         * network interaction.
         */
    /*
         * HttpUrlConnection seems to be buggy in terms of implementation of
         * expect continue.
         */
    // request.addHeader("Expect", "100-continue");
    ObjectMetadata returnedMetadata = null;
    try {
        returnedMetadata = invoke(request, new S3MetadataResponseHandler(), bucketName, key);
    } catch (final AmazonClientException ace) {
        fireProgressEvent(progressListenerCallbackExecutor, ProgressEvent.FAILED_EVENT_CODE);
        throw ace;
    } finally {
        try {
            input.close();
        } catch (final AbortedException ignore) {
        } catch (final Exception e) {
            log.debug("Unable to cleanly close input stream: " + e.getMessage(), e);
        }
    }
    fireProgressEvent(progressListenerCallbackExecutor, ProgressEvent.COMPLETED_EVENT_CODE);
    final PutObjectResult result = new PutObjectResult();
    result.setVersionId(returnedMetadata.getVersionId());
    result.setSSEAlgorithm(returnedMetadata.getSSEAlgorithm());
    result.setSSECustomerAlgorithm(returnedMetadata.getSSECustomerAlgorithm());
    result.setSSECustomerKeyMd5(returnedMetadata.getSSECustomerKeyMd5());
    result.setExpirationTime(returnedMetadata.getExpirationTime());
    result.setExpirationTimeRuleId(returnedMetadata.getExpirationTimeRuleId());
    result.setETag(returnedMetadata.getETag());
    result.setMetadata(returnedMetadata);
    result.setRequesterCharged(returnedMetadata.isRequesterCharged());
    result.setContentMd5(returnedMetadata.getContentMD5());
    return result;
}
Also used : LengthCheckInputStream(com.amazonaws.util.LengthCheckInputStream) ServiceClientHolderInputStream(com.amazonaws.util.ServiceClientHolderInputStream) LengthCheckInputStream(com.amazonaws.util.LengthCheckInputStream) ByteArrayInputStream(java.io.ByteArrayInputStream) ProgressReportingInputStream(com.amazonaws.event.ProgressReportingInputStream) RepeatableFileInputStream(com.amazonaws.services.s3.internal.RepeatableFileInputStream) InputStream(java.io.InputStream) AmazonClientException(com.amazonaws.AmazonClientException) FileNotFoundException(java.io.FileNotFoundException) RepeatableFileInputStream(com.amazonaws.services.s3.internal.RepeatableFileInputStream) AmazonServiceException(com.amazonaws.AmazonServiceException) AmazonClientException(com.amazonaws.AmazonClientException) IOException(java.io.IOException) URISyntaxException(java.net.URISyntaxException) FileNotFoundException(java.io.FileNotFoundException) AbortedException(com.amazonaws.AbortedException) ProgressListenerCallbackExecutor(com.amazonaws.event.ProgressListenerCallbackExecutor) ProgressListener(com.amazonaws.event.ProgressListener) ByteArrayInputStream(java.io.ByteArrayInputStream) AbortedException(com.amazonaws.AbortedException) File(java.io.File) S3MetadataResponseHandler(com.amazonaws.services.s3.internal.S3MetadataResponseHandler) ProgressReportingInputStream(com.amazonaws.event.ProgressReportingInputStream)

Example 4 with RepeatableFileInputStream

use of com.amazonaws.services.s3.internal.RepeatableFileInputStream in project aws-sdk-android by aws-amplify.

the class EncryptionUtils method getEncryptedInputStream.

/**
 * Retrives the encrypted input stream.
 * @param request the UploadPartRequest to encrypt.
 * @param cipherFactory the CipherFactory used to encrypt.
 * @return the encrypted input stream.
 */
public static ByteRangeCapturingInputStream getEncryptedInputStream(UploadPartRequest request, CipherFactory cipherFactory) {
    try {
        InputStream originalInputStream = request.getInputStream();
        if (request.getFile() != null) {
            originalInputStream = new InputSubstream(new RepeatableFileInputStream(request.getFile()), request.getFileOffset(), request.getPartSize(), request.isLastPart());
        }
        originalInputStream = new RepeatableCipherInputStream(originalInputStream, cipherFactory);
        if (!request.isLastPart()) {
            // We want to prevent the final padding from being sent on the
            // stream...
            originalInputStream = new InputSubstream(originalInputStream, 0, request.getPartSize(), false);
        }
        long partSize = request.getPartSize();
        int cipherBlockSize = cipherFactory.createCipher().getBlockSize();
        return new ByteRangeCapturingInputStream(originalInputStream, partSize - cipherBlockSize, partSize);
    } catch (Exception e) {
        throw new AmazonClientException("Unable to create cipher input stream: " + e.getMessage(), e);
    }
}
Also used : RepeatableCipherInputStream(com.amazonaws.services.s3.internal.RepeatableCipherInputStream) InputSubstream(com.amazonaws.services.s3.internal.InputSubstream) LengthCheckInputStream(com.amazonaws.util.LengthCheckInputStream) RepeatableFileInputStream(com.amazonaws.services.s3.internal.RepeatableFileInputStream) ByteArrayInputStream(java.io.ByteArrayInputStream) RepeatableCipherInputStream(com.amazonaws.services.s3.internal.RepeatableCipherInputStream) S3ObjectInputStream(com.amazonaws.services.s3.model.S3ObjectInputStream) InputStream(java.io.InputStream) AmazonClientException(com.amazonaws.AmazonClientException) RepeatableFileInputStream(com.amazonaws.services.s3.internal.RepeatableFileInputStream) IOException(java.io.IOException) NoSuchAlgorithmException(java.security.NoSuchAlgorithmException) AmazonClientException(com.amazonaws.AmazonClientException)

Aggregations

RepeatableFileInputStream (com.amazonaws.services.s3.internal.RepeatableFileInputStream)4 AmazonClientException (com.amazonaws.AmazonClientException)3 InputSubstream (com.amazonaws.services.s3.internal.InputSubstream)3 LengthCheckInputStream (com.amazonaws.util.LengthCheckInputStream)3 ByteArrayInputStream (java.io.ByteArrayInputStream)3 IOException (java.io.IOException)3 InputStream (java.io.InputStream)3 AbortedException (com.amazonaws.AbortedException)2 AmazonServiceException (com.amazonaws.AmazonServiceException)2 ProgressListener (com.amazonaws.event.ProgressListener)2 ProgressListenerCallbackExecutor (com.amazonaws.event.ProgressListenerCallbackExecutor)2 ProgressReportingInputStream (com.amazonaws.event.ProgressReportingInputStream)2 S3MetadataResponseHandler (com.amazonaws.services.s3.internal.S3MetadataResponseHandler)2 ServiceClientHolderInputStream (com.amazonaws.util.ServiceClientHolderInputStream)2 File (java.io.File)2 FileNotFoundException (java.io.FileNotFoundException)2 URISyntaxException (java.net.URISyntaxException)2 RepeatableCipherInputStream (com.amazonaws.services.s3.internal.RepeatableCipherInputStream)1 S3ObjectInputStream (com.amazonaws.services.s3.model.S3ObjectInputStream)1 FileOutputStream (java.io.FileOutputStream)1