use of software.amazon.awssdk.services.s3.model.CopyObjectRequest in project aws-doc-sdk-examples by awsdocs.
the class CopyObjectSingleOperation method main.
public static void main(String[] args) throws IOException {
Regions clientRegion = Regions.DEFAULT_REGION;
String bucketName = "*** Bucket name ***";
String sourceKey = "*** Source object key *** ";
String destinationKey = "*** Destination object key ***";
try {
AmazonS3 s3Client = AmazonS3ClientBuilder.standard().withCredentials(new ProfileCredentialsProvider()).withRegion(clientRegion).build();
// Copy the object into a new object in the same bucket.
CopyObjectRequest copyObjRequest = new CopyObjectRequest(bucketName, sourceKey, bucketName, destinationKey);
s3Client.copyObject(copyObjRequest);
} catch (AmazonServiceException e) {
// The call was transmitted successfully, but Amazon S3 couldn't process
// it, so it returned an error response.
e.printStackTrace();
} catch (SdkClientException e) {
// Amazon S3 couldn't be contacted for a response, or the client
// couldn't parse the response from Amazon S3.
e.printStackTrace();
}
}
use of software.amazon.awssdk.services.s3.model.CopyObjectRequest in project alluxio by Alluxio.
the class S3AUnderFileSystem method copyObject.
@Override
protected boolean copyObject(String src, String dst) {
LOG.debug("Copying {} to {}", src, dst);
// Retry copy for a few times, in case some AWS internal errors happened during copy.
int retries = 3;
for (int i = 0; i < retries; i++) {
try {
CopyObjectRequest request = new CopyObjectRequest(mBucketName, src, mBucketName, dst);
if (mUfsConf.getBoolean(PropertyKey.UNDERFS_S3_SERVER_SIDE_ENCRYPTION_ENABLED)) {
ObjectMetadata meta = new ObjectMetadata();
meta.setSSEAlgorithm(ObjectMetadata.AES_256_SERVER_SIDE_ENCRYPTION);
request.setNewObjectMetadata(meta);
}
mManager.copy(request).waitForCopyResult();
return true;
} catch (AmazonClientException | InterruptedException e) {
LOG.error("Failed to copy file {} to {}", src, dst, e);
if (i != retries - 1) {
LOG.error("Retrying copying file {} to {}", src, dst);
}
}
}
LOG.error("Failed to copy file {} to {}, after {} retries", src, dst, retries);
return false;
}
use of software.amazon.awssdk.services.s3.model.CopyObjectRequest in project stocator by SparkTC.
the class COSAPIClient method rename.
@Override
public boolean rename(String hostName, String srcPath, String dstPath) throws IOException {
LOG.debug("Rename path {} to {}", srcPath, dstPath);
Path src = new Path(srcPath);
Path dst = new Path(dstPath);
String srcKey = pathToKey(src);
String dstKey = pathToKey(dst);
if (srcKey.isEmpty()) {
throw new IOException("Rename failed " + srcPath + " to " + dstPath + " source is root directory");
}
if (dstKey.isEmpty()) {
throw new IOException("Rename failed " + srcPath + " to " + dstPath + " dest is root directory");
}
// get the source file status; this raises a FNFE if there is no source
// file.
FileStatus srcStatus = getFileStatus(hostName, src, "rename");
if (srcKey.equals(dstKey)) {
LOG.debug("rename: src and dest refer to the same file or directory: {}", dstPath);
throw new IOException("source + " + srcPath + "and dest " + dstPath + " refer to the same file or directory");
}
FileStatus dstStatus = null;
try {
dstStatus = getFileStatus(hostName, dst, "rename");
// whether or not it can be the destination of the rename.
if (srcStatus.isDirectory()) {
if (dstStatus.isFile()) {
throw new IOException("source + " + srcPath + "and dest " + dstPath + "source is a directory and dest is a file");
}
// at this point the destination is an empty directory
} else {
// empty or not
if (dstStatus.isFile()) {
throw new IOException("source + " + srcPath + "and dest " + dstPath + "Cannot rename onto an existing file");
}
}
} catch (FileNotFoundException e) {
LOG.debug("rename: destination path {} not found", dstPath);
}
if (srcStatus.isFile()) {
LOG.debug("rename: renaming file {} to {}", src, dst);
long length = srcStatus.getLen();
if (dstStatus != null && dstStatus.isDirectory()) {
String newDstKey = dstKey;
if (!newDstKey.endsWith("/")) {
newDstKey = newDstKey + "/";
}
String filename = srcKey.substring(pathToKey(src.getParent()).length() + 1);
newDstKey = newDstKey + filename;
copyFile(srcKey, newDstKey, length);
} else {
copyFile(srcKey, dstKey, srcStatus.getLen());
}
delete(hostName, src, false);
} else {
ObjectListing objects = mClient.listObjects(mBucket, srcKey);
List<S3ObjectSummary> summaries = objects.getObjectSummaries();
while (objects.isTruncated()) {
objects = mClient.listNextBatchOfObjects(objects);
summaries.addAll(objects.getObjectSummaries());
}
// Batch copy using TransferManager
// Build a list of copyRequests
List<CopyObjectRequest> copyRequests = new ArrayList<>();
List<KeyVersion> keysToDelete = new ArrayList<>();
for (S3ObjectSummary objectSummary : summaries) {
String newSrcKey = objectSummary.getKey();
keysToDelete.add(new KeyVersion(newSrcKey));
// Just in case there are still folders returned as objects
if (newSrcKey.endsWith("/")) {
LOG.debug("rename: {} is folder and will be ignored", newSrcKey);
continue;
}
long length = objectSummary.getSize();
if ((dstStatus != null && dstStatus.isDirectory()) || (dstStatus == null)) {
String newDstKey = dstKey;
if (!newDstKey.endsWith("/")) {
newDstKey = newDstKey + "/";
}
String filename = newSrcKey.substring(pathToKey(src).length() + 1);
newDstKey = newDstKey + filename;
CopyObjectRequest copyObjectRequest = new CopyObjectRequest(mBucket, newSrcKey, mBucket, newDstKey);
ObjectMetadata srcmd = getObjectMetadata(newSrcKey);
if (srcmd != null) {
copyObjectRequest.setNewObjectMetadata(srcmd);
}
copyRequests.add(copyObjectRequest);
} else {
throw new IOException("Unexpected dstStatus");
}
}
// Submit the copy jobs to transfermanager
CountDownLatch doneSignal = new CountDownLatch(copyRequests.size());
ArrayList copyJobs = new ArrayList();
for (CopyObjectRequest request : copyRequests) {
request.setGeneralProgressListener(new CopyCompleteListener(request.getSourceBucketName() + "/" + request.getSourceKey(), request.getDestinationBucketName() + "/" + request.getDestinationKey(), doneSignal));
copyJobs.add(transfers.copy(request));
}
try {
doneSignal.await();
} catch (AmazonClientException e) {
throw new IOException("Couldn't wait for all copies to be finished");
} catch (InterruptedException e) {
throw new InterruptedIOException("Interrupted copying objects, cancelling");
}
// Delete source objects
DeleteObjectsRequest keysDeleteRequest = new DeleteObjectsRequest(mBucket).withKeys(keysToDelete).withQuiet(false);
// Verify that the object versions were successfully deleted.
DeleteObjectsResult deletedKeys = mClient.deleteObjects(keysDeleteRequest);
int successfulDeletes = deletedKeys.getDeletedObjects().size();
LOG.debug(successfulDeletes + " objects successfully deleted");
}
if (!(src.getParent().equals(dst.getParent()))) {
LOG.debug("{} is not equal to {}. Going to create directory {}", src.getParent(), dst.getParent(), src.getParent());
createDirectoryIfNecessary(hostName, src.getParent());
}
return true;
}
use of software.amazon.awssdk.services.s3.model.CopyObjectRequest in project CollectiveOneWebapp by CollectiveOne.
the class FileService method copyImageAfterCreationToCard.
@Transactional
public UUID copyImageAfterCreationToCard(UUID userId, UUID fileId, UUID cardWrapperId) {
FileStored fileStored = fileStoredRepository.findById(fileId);
if (fileStored == null) {
return null;
}
String sourceBucketName = fileStored.getBucket();
String sourceKey = fileStored.getKey();
String destinationBucketName = fileStored.getBucket();
String destinationKey = "CardImages/" + cardWrapperId.toString();
amazonS3Template.getAmazonS3Client().copyObject(new CopyObjectRequest(sourceBucketName, sourceKey, destinationBucketName, destinationKey).withCannedAccessControlList(CannedAccessControlList.PublicRead));
FileStored newFileStored = new FileStored();
newFileStored.setBucket(fileStored.getBucket());
newFileStored.setKey(destinationKey);
newFileStored.setUploadedBy(appUserRepository.findByC1Id(userId));
newFileStored.setUrl(baseUrl + "/" + bucketName + "/" + destinationKey);
newFileStored.setLastUpdated(new Timestamp(System.currentTimeMillis()));
newFileStored.setInitiative(fileStored.getInitiative());
newFileStored = fileStoredRepository.save(newFileStored);
return newFileStored.getId();
}
use of software.amazon.awssdk.services.s3.model.CopyObjectRequest in project jackrabbit-oak by apache.
the class S3Backend method write.
/**
* It uploads file to Amazon S3. If file size is greater than 5MB, this
* method uses parallel concurrent connections to upload.
*/
@Override
public void write(DataIdentifier identifier, File file) throws DataStoreException {
String key = getKeyName(identifier);
ObjectMetadata objectMetaData = null;
long start = System.currentTimeMillis();
ClassLoader contextClassLoader = Thread.currentThread().getContextClassLoader();
try {
Thread.currentThread().setContextClassLoader(getClass().getClassLoader());
// check if the same record already exists
try {
objectMetaData = s3service.getObjectMetadata(bucket, key);
} catch (AmazonServiceException ase) {
if (!(ase.getStatusCode() == 404 || ase.getStatusCode() == 403)) {
throw ase;
}
}
if (objectMetaData != null) {
long l = objectMetaData.getContentLength();
if (l != file.length()) {
throw new DataStoreException("Collision: " + key + " new length: " + file.length() + " old length: " + l);
}
LOG.debug("[{}]'s exists, lastmodified = [{}]", key, objectMetaData.getLastModified().getTime());
CopyObjectRequest copReq = new CopyObjectRequest(bucket, key, bucket, key);
copReq.setNewObjectMetadata(objectMetaData);
Copy copy = tmx.copy(s3ReqDecorator.decorate(copReq));
try {
copy.waitForCopyResult();
LOG.debug("lastModified of [{}] updated successfully.", identifier);
} catch (Exception e2) {
throw new DataStoreException("Could not upload " + key, e2);
}
}
if (objectMetaData == null) {
try {
// start multipart parallel upload using amazon sdk
Upload up = tmx.upload(s3ReqDecorator.decorate(new PutObjectRequest(bucket, key, file)));
// wait for upload to finish
up.waitForUploadResult();
LOG.debug("synchronous upload to identifier [{}] completed.", identifier);
} catch (Exception e2) {
throw new DataStoreException("Could not upload " + key, e2);
}
}
} finally {
if (contextClassLoader != null) {
Thread.currentThread().setContextClassLoader(contextClassLoader);
}
}
LOG.debug("write of [{}], length=[{}], in [{}]ms", identifier, file.length(), (System.currentTimeMillis() - start));
}
Aggregations