Search in sources :

Example 1 with com.amazonaws.services.s3.transfer

use of com.amazonaws.services.s3.transfer in project datarouter by hotpads.

the class BaseDatarouterS3Client method handleTransfer.

private void handleTransfer(Transfer transfer, Runnable heartbeat) {
    TransferProgress progress = transfer.getProgress();
    long totalBytesToTransfer = progress.getTotalBytesToTransfer();
    while (!transfer.isDone()) {
        try {
            heartbeat.run();
        } catch (Exception e) {
            logger.error("couldn't heartbeat", e);
        }
        logger.warn("{} / {} pct={} bytesTransferred={} totalBytesToTransfer={}", ByteUnitTool.byteCountToDisplaySize(progress.getBytesTransferred()), ByteUnitTool.byteCountToDisplaySize(totalBytesToTransfer), NumberFormatter.format(progress.getPercentTransferred(), 2), progress.getBytesTransferred(), totalBytesToTransfer);
        ThreadTool.sleepUnchecked(1000L);
    }
}
Also used : S3Exception(software.amazon.awssdk.services.s3.model.S3Exception) NoSuchBucketException(software.amazon.awssdk.services.s3.model.NoSuchBucketException) NoSuchKeyException(software.amazon.awssdk.services.s3.model.NoSuchKeyException) IOException(java.io.IOException) TransferProgress(com.amazonaws.services.s3.transfer.TransferProgress)

Example 2 with com.amazonaws.services.s3.transfer

use of com.amazonaws.services.s3.transfer in project snowflake-jdbc by snowflakedb.

the class SnowflakeS3Client method upload.

/**
 * Upload a file (-stream) to S3.
 *
 * @param session session object
 * @param command upload command
 * @param parallelism number of threads do parallel uploading
 * @param uploadFromStream true if upload source is stream
 * @param remoteStorageLocation s3 bucket name
 * @param srcFile source file if not uploading from a stream
 * @param destFileName file name on s3 after upload
 * @param inputStream stream used for uploading if fileBackedOutputStream is null
 * @param fileBackedOutputStream stream used for uploading if not null
 * @param meta object meta data
 * @param stageRegion region name where the stage persists
 * @param presignedUrl Not used in S3
 * @throws SnowflakeSQLException if upload failed even after retry
 */
@Override
public void upload(SFSession session, String command, int parallelism, boolean uploadFromStream, String remoteStorageLocation, File srcFile, String destFileName, InputStream inputStream, FileBackedOutputStream fileBackedOutputStream, StorageObjectMetadata meta, String stageRegion, String presignedUrl) throws SnowflakeSQLException {
    final long originalContentLength = meta.getContentLength();
    final List<FileInputStream> toClose = new ArrayList<>();
    SFPair<InputStream, Boolean> uploadStreamInfo = createUploadStream(srcFile, uploadFromStream, inputStream, fileBackedOutputStream, ((S3ObjectMetadata) meta).getS3ObjectMetadata(), originalContentLength, toClose);
    ObjectMetadata s3Meta;
    if (meta instanceof S3ObjectMetadata) {
        s3Meta = ((S3ObjectMetadata) meta).getS3ObjectMetadata();
    } else {
        throw new IllegalArgumentException("Unexpected metadata object type");
    }
    TransferManager tx = null;
    int retryCount = 0;
    do {
        try {
            logger.debug("Creating executor service for transfer" + "manager with {} threads", parallelism);
            // upload files to s3
            tx = TransferManagerBuilder.standard().withS3Client(amazonClient).withExecutorFactory(new ExecutorFactory() {

                @Override
                public ExecutorService newExecutor() {
                    return SnowflakeUtil.createDefaultExecutorService("s3-transfer-manager-uploader-", parallelism);
                }
            }).build();
            final Upload myUpload;
            if (!this.isClientSideEncrypted) {
                // since we're not client-side encrypting, make sure we're server-side encrypting with
                // SSE-S3
                s3Meta.setSSEAlgorithm(ObjectMetadata.AES_256_SERVER_SIDE_ENCRYPTION);
            }
            if (uploadStreamInfo.right) {
                myUpload = tx.upload(remoteStorageLocation, destFileName, uploadStreamInfo.left, s3Meta);
            } else {
                PutObjectRequest putRequest = new PutObjectRequest(remoteStorageLocation, destFileName, srcFile);
                putRequest.setMetadata(s3Meta);
                myUpload = tx.upload(putRequest);
            }
            myUpload.waitForCompletion();
            // get out
            for (FileInputStream is : toClose) IOUtils.closeQuietly(is);
            return;
        } catch (Exception ex) {
            handleS3Exception(ex, ++retryCount, "upload", session, command, this);
            if (uploadFromStream && fileBackedOutputStream == null) {
                throw new SnowflakeSQLException(ex, SqlState.SYSTEM_ERROR, ErrorCode.IO_ERROR.getMessageCode(), "Encountered exception during upload: " + ex.getMessage() + "\nCannot retry upload from stream.");
            }
            uploadStreamInfo = createUploadStream(srcFile, uploadFromStream, inputStream, fileBackedOutputStream, s3Meta, originalContentLength, toClose);
        } finally {
            if (tx != null) {
                tx.shutdownNow(false);
            }
        }
    } while (retryCount <= getMaxRetries());
    for (FileInputStream is : toClose) IOUtils.closeQuietly(is);
    throw new SnowflakeSQLLoggedException(session, ErrorCode.INTERNAL_ERROR.getMessageCode(), SqlState.INTERNAL_ERROR, "Unexpected: upload unsuccessful without exception!");
}
Also used : TransferManager(com.amazonaws.services.s3.transfer.TransferManager) ArrayList(java.util.ArrayList) Upload(com.amazonaws.services.s3.transfer.Upload) SocketTimeoutException(java.net.SocketTimeoutException) SSLInitializationException(org.apache.http.conn.ssl.SSLInitializationException) AmazonServiceException(com.amazonaws.AmazonServiceException) KeyManagementException(java.security.KeyManagementException) NoSuchAlgorithmException(java.security.NoSuchAlgorithmException) InvalidKeyException(java.security.InvalidKeyException) AmazonClientException(com.amazonaws.AmazonClientException) ExecutorFactory(com.amazonaws.client.builder.ExecutorFactory) ExecutorService(java.util.concurrent.ExecutorService)

Example 3 with com.amazonaws.services.s3.transfer

use of com.amazonaws.services.s3.transfer in project geowebcache by GeoWebCache.

the class S3BlobStore method checkBucketPolicy.

/**
 * Checks a {@link com.amazonaws.services.s3.AmazonS3Client} by getting the policy out of the
 * bucket, as implemented by S3, Minio, but not, for example, by Cohesity.
 */
private void checkBucketPolicy(AmazonS3Client client, String bucketName) throws Exception {
    try {
        log.debug("Checking policy for bucket " + bucketName);
        BucketPolicy bucketPol = client.getBucketPolicy(bucketName);
        log.debug("Bucket " + bucketName + " policy: " + bucketPol.getPolicyText());
    } catch (AmazonServiceException se) {
        throw new StorageException("Server error getting bucket policy: " + se.getMessage(), se);
    }
}
Also used : BucketPolicy(com.amazonaws.services.s3.model.BucketPolicy) AmazonServiceException(com.amazonaws.AmazonServiceException) StorageException(org.geowebcache.storage.StorageException)

Example 4 with com.amazonaws.services.s3.transfer

use of com.amazonaws.services.s3.transfer in project alluxio by Alluxio.

the class S3AOutputStream method close.

@Override
public void close() throws IOException {
    if (mClosed) {
        return;
    }
    mLocalOutputStream.close();
    String path = getUploadPath();
    try {
        // Generate the object metadata by setting server side encryption, md5 checksum, the file
        // length, and encoding as octet stream since no assumptions are made about the file type
        ObjectMetadata meta = new ObjectMetadata();
        if (mSseEnabled) {
            meta.setSSEAlgorithm(ObjectMetadata.AES_256_SERVER_SIDE_ENCRYPTION);
        }
        if (mHash != null) {
            meta.setContentMD5(new String(Base64.encode(mHash.digest())));
        }
        meta.setContentLength(mFile.length());
        meta.setContentType(Mimetypes.MIMETYPE_OCTET_STREAM);
        // Generate the put request and wait for the transfer manager to complete the upload
        PutObjectRequest putReq = new PutObjectRequest(mBucketName, path, mFile).withMetadata(meta);
        getTransferManager().upload(putReq).waitForUploadResult();
    } catch (Exception e) {
        LOG.error("Failed to upload {}", path, e);
        throw new IOException(e);
    } finally {
        // upload or if the upload failed.
        if (!mFile.delete()) {
            LOG.error("Failed to delete temporary file @ {}", mFile.getPath());
        }
        // Set the closed flag, close can be retried until mFile.delete is called successfully
        mClosed = true;
    }
}
Also used : IOException(java.io.IOException) ObjectMetadata(com.amazonaws.services.s3.model.ObjectMetadata) PutObjectRequest(com.amazonaws.services.s3.model.PutObjectRequest) IOException(java.io.IOException) NoSuchAlgorithmException(java.security.NoSuchAlgorithmException)

Example 5 with com.amazonaws.services.s3.transfer

use of com.amazonaws.services.s3.transfer in project emodb by bazaarvoice.

the class S3ScanWriterTest method testWriteWithError.

@Test
public void testWriteWithError() throws Exception {
    URI baseUri = URI.create("s3://test-bucket/scan");
    ScheduledExecutorService uploadService = Executors.newScheduledThreadPool(2);
    AmazonS3 amazonS3 = mock(AmazonS3.class);
    final MetricRegistry metricRegistry = new MetricRegistry();
    when(amazonS3.putObject(any(PutObjectRequest.class))).thenThrow(new AmazonClientException("Simulated transfer failure"));
    AmazonS3Provider amazonS3Provider = mock(AmazonS3Provider.class);
    when(amazonS3Provider.getS3ClientForBucket("test-bucket")).thenReturn(amazonS3);
    S3ScanWriter scanWriter = new S3ScanWriter(1, baseUri, Optional.of(2), metricRegistry, amazonS3Provider, uploadService, new ObjectMapper());
    scanWriter.setRetryDelay(Duration.ofMillis(10));
    try {
        ScanDestinationWriter scanDestinationWriter = scanWriter.writeShardRows("testtable", "p0", 0, 1);
        scanDestinationWriter.writeDocument(ImmutableMap.of("type", "review", "rating", 5));
        scanDestinationWriter.closeAndTransferAsync(Optional.of(1));
        scanWriter.waitForAllTransfersComplete(Duration.ofSeconds(10));
        fail("No transfer exception thrown");
    } catch (IOException e) {
        assertTrue(e.getCause() instanceof AmazonClientException);
        assertEquals(e.getCause().getMessage(), "Simulated transfer failure");
    } finally {
        uploadService.shutdownNow();
    }
    // Transfer should have been retried three times
    verify(amazonS3, times(3)).putObject(any(PutObjectRequest.class));
}
Also used : AmazonS3(com.amazonaws.services.s3.AmazonS3) ScheduledExecutorService(java.util.concurrent.ScheduledExecutorService) MetricRegistry(com.codahale.metrics.MetricRegistry) AmazonClientException(com.amazonaws.AmazonClientException) IOException(java.io.IOException) URI(java.net.URI) PutObjectRequest(com.amazonaws.services.s3.model.PutObjectRequest) ObjectMapper(com.fasterxml.jackson.databind.ObjectMapper) Test(org.testng.annotations.Test)

Aggregations

S3FileTransferRequestParamsDto (org.finra.herd.model.dto.S3FileTransferRequestParamsDto)37 AmazonClientException (com.amazonaws.AmazonClientException)30 Test (org.junit.Test)30 AmazonServiceException (com.amazonaws.AmazonServiceException)29 ByteArrayInputStream (java.io.ByteArrayInputStream)24 File (java.io.File)24 ObjectMetadata (com.amazonaws.services.s3.model.ObjectMetadata)23 Tag (com.amazonaws.services.s3.model.Tag)23 S3ObjectSummary (com.amazonaws.services.s3.model.S3ObjectSummary)21 IOException (java.io.IOException)20 PutObjectRequest (com.amazonaws.services.s3.model.PutObjectRequest)19 TransferManager (com.amazonaws.services.s3.transfer.TransferManager)18 S3ObjectTaggerRoleParamsDto (org.finra.herd.model.dto.S3ObjectTaggerRoleParamsDto)16 AmazonS3Client (com.amazonaws.services.s3.AmazonS3Client)15 AmazonS3 (com.amazonaws.services.s3.AmazonS3)12 GetObjectTaggingRequest (com.amazonaws.services.s3.model.GetObjectTaggingRequest)10 GetObjectTaggingResult (com.amazonaws.services.s3.model.GetObjectTaggingResult)10 Transfer (com.amazonaws.services.s3.transfer.Transfer)9 AbstractDaoTest (org.finra.herd.dao.AbstractDaoTest)9 AmazonS3Exception (com.amazonaws.services.s3.model.AmazonS3Exception)8