use of com.amazonaws.services.s3.transfer in project datarouter by hotpads.
the class BaseDatarouterS3Client method handleTransfer.
private void handleTransfer(Transfer transfer, Runnable heartbeat) {
TransferProgress progress = transfer.getProgress();
long totalBytesToTransfer = progress.getTotalBytesToTransfer();
while (!transfer.isDone()) {
try {
heartbeat.run();
} catch (Exception e) {
logger.error("couldn't heartbeat", e);
}
logger.warn("{} / {} pct={} bytesTransferred={} totalBytesToTransfer={}", ByteUnitTool.byteCountToDisplaySize(progress.getBytesTransferred()), ByteUnitTool.byteCountToDisplaySize(totalBytesToTransfer), NumberFormatter.format(progress.getPercentTransferred(), 2), progress.getBytesTransferred(), totalBytesToTransfer);
ThreadTool.sleepUnchecked(1000L);
}
}
use of com.amazonaws.services.s3.transfer in project snowflake-jdbc by snowflakedb.
the class SnowflakeS3Client method upload.
/**
* Upload a file (-stream) to S3.
*
* @param session session object
* @param command upload command
* @param parallelism number of threads do parallel uploading
* @param uploadFromStream true if upload source is stream
* @param remoteStorageLocation s3 bucket name
* @param srcFile source file if not uploading from a stream
* @param destFileName file name on s3 after upload
* @param inputStream stream used for uploading if fileBackedOutputStream is null
* @param fileBackedOutputStream stream used for uploading if not null
* @param meta object meta data
* @param stageRegion region name where the stage persists
* @param presignedUrl Not used in S3
* @throws SnowflakeSQLException if upload failed even after retry
*/
@Override
public void upload(SFSession session, String command, int parallelism, boolean uploadFromStream, String remoteStorageLocation, File srcFile, String destFileName, InputStream inputStream, FileBackedOutputStream fileBackedOutputStream, StorageObjectMetadata meta, String stageRegion, String presignedUrl) throws SnowflakeSQLException {
final long originalContentLength = meta.getContentLength();
final List<FileInputStream> toClose = new ArrayList<>();
SFPair<InputStream, Boolean> uploadStreamInfo = createUploadStream(srcFile, uploadFromStream, inputStream, fileBackedOutputStream, ((S3ObjectMetadata) meta).getS3ObjectMetadata(), originalContentLength, toClose);
ObjectMetadata s3Meta;
if (meta instanceof S3ObjectMetadata) {
s3Meta = ((S3ObjectMetadata) meta).getS3ObjectMetadata();
} else {
throw new IllegalArgumentException("Unexpected metadata object type");
}
TransferManager tx = null;
int retryCount = 0;
do {
try {
logger.debug("Creating executor service for transfer" + "manager with {} threads", parallelism);
// upload files to s3
tx = TransferManagerBuilder.standard().withS3Client(amazonClient).withExecutorFactory(new ExecutorFactory() {
@Override
public ExecutorService newExecutor() {
return SnowflakeUtil.createDefaultExecutorService("s3-transfer-manager-uploader-", parallelism);
}
}).build();
final Upload myUpload;
if (!this.isClientSideEncrypted) {
// since we're not client-side encrypting, make sure we're server-side encrypting with
// SSE-S3
s3Meta.setSSEAlgorithm(ObjectMetadata.AES_256_SERVER_SIDE_ENCRYPTION);
}
if (uploadStreamInfo.right) {
myUpload = tx.upload(remoteStorageLocation, destFileName, uploadStreamInfo.left, s3Meta);
} else {
PutObjectRequest putRequest = new PutObjectRequest(remoteStorageLocation, destFileName, srcFile);
putRequest.setMetadata(s3Meta);
myUpload = tx.upload(putRequest);
}
myUpload.waitForCompletion();
// get out
for (FileInputStream is : toClose) IOUtils.closeQuietly(is);
return;
} catch (Exception ex) {
handleS3Exception(ex, ++retryCount, "upload", session, command, this);
if (uploadFromStream && fileBackedOutputStream == null) {
throw new SnowflakeSQLException(ex, SqlState.SYSTEM_ERROR, ErrorCode.IO_ERROR.getMessageCode(), "Encountered exception during upload: " + ex.getMessage() + "\nCannot retry upload from stream.");
}
uploadStreamInfo = createUploadStream(srcFile, uploadFromStream, inputStream, fileBackedOutputStream, s3Meta, originalContentLength, toClose);
} finally {
if (tx != null) {
tx.shutdownNow(false);
}
}
} while (retryCount <= getMaxRetries());
for (FileInputStream is : toClose) IOUtils.closeQuietly(is);
throw new SnowflakeSQLLoggedException(session, ErrorCode.INTERNAL_ERROR.getMessageCode(), SqlState.INTERNAL_ERROR, "Unexpected: upload unsuccessful without exception!");
}
use of com.amazonaws.services.s3.transfer in project geowebcache by GeoWebCache.
the class S3BlobStore method checkBucketPolicy.
/**
* Checks a {@link com.amazonaws.services.s3.AmazonS3Client} by getting the policy out of the
* bucket, as implemented by S3, Minio, but not, for example, by Cohesity.
*/
private void checkBucketPolicy(AmazonS3Client client, String bucketName) throws Exception {
try {
log.debug("Checking policy for bucket " + bucketName);
BucketPolicy bucketPol = client.getBucketPolicy(bucketName);
log.debug("Bucket " + bucketName + " policy: " + bucketPol.getPolicyText());
} catch (AmazonServiceException se) {
throw new StorageException("Server error getting bucket policy: " + se.getMessage(), se);
}
}
use of com.amazonaws.services.s3.transfer in project alluxio by Alluxio.
the class S3AOutputStream method close.
@Override
public void close() throws IOException {
if (mClosed) {
return;
}
mLocalOutputStream.close();
String path = getUploadPath();
try {
// Generate the object metadata by setting server side encryption, md5 checksum, the file
// length, and encoding as octet stream since no assumptions are made about the file type
ObjectMetadata meta = new ObjectMetadata();
if (mSseEnabled) {
meta.setSSEAlgorithm(ObjectMetadata.AES_256_SERVER_SIDE_ENCRYPTION);
}
if (mHash != null) {
meta.setContentMD5(new String(Base64.encode(mHash.digest())));
}
meta.setContentLength(mFile.length());
meta.setContentType(Mimetypes.MIMETYPE_OCTET_STREAM);
// Generate the put request and wait for the transfer manager to complete the upload
PutObjectRequest putReq = new PutObjectRequest(mBucketName, path, mFile).withMetadata(meta);
getTransferManager().upload(putReq).waitForUploadResult();
} catch (Exception e) {
LOG.error("Failed to upload {}", path, e);
throw new IOException(e);
} finally {
// upload or if the upload failed.
if (!mFile.delete()) {
LOG.error("Failed to delete temporary file @ {}", mFile.getPath());
}
// Set the closed flag, close can be retried until mFile.delete is called successfully
mClosed = true;
}
}
use of com.amazonaws.services.s3.transfer in project emodb by bazaarvoice.
the class S3ScanWriterTest method testWriteWithError.
@Test
public void testWriteWithError() throws Exception {
URI baseUri = URI.create("s3://test-bucket/scan");
ScheduledExecutorService uploadService = Executors.newScheduledThreadPool(2);
AmazonS3 amazonS3 = mock(AmazonS3.class);
final MetricRegistry metricRegistry = new MetricRegistry();
when(amazonS3.putObject(any(PutObjectRequest.class))).thenThrow(new AmazonClientException("Simulated transfer failure"));
AmazonS3Provider amazonS3Provider = mock(AmazonS3Provider.class);
when(amazonS3Provider.getS3ClientForBucket("test-bucket")).thenReturn(amazonS3);
S3ScanWriter scanWriter = new S3ScanWriter(1, baseUri, Optional.of(2), metricRegistry, amazonS3Provider, uploadService, new ObjectMapper());
scanWriter.setRetryDelay(Duration.ofMillis(10));
try {
ScanDestinationWriter scanDestinationWriter = scanWriter.writeShardRows("testtable", "p0", 0, 1);
scanDestinationWriter.writeDocument(ImmutableMap.of("type", "review", "rating", 5));
scanDestinationWriter.closeAndTransferAsync(Optional.of(1));
scanWriter.waitForAllTransfersComplete(Duration.ofSeconds(10));
fail("No transfer exception thrown");
} catch (IOException e) {
assertTrue(e.getCause() instanceof AmazonClientException);
assertEquals(e.getCause().getMessage(), "Simulated transfer failure");
} finally {
uploadService.shutdownNow();
}
// Transfer should have been retried three times
verify(amazonS3, times(3)).putObject(any(PutObjectRequest.class));
}
Aggregations