use of software.amazon.awssdk.services.s3.model.DeleteObjectsRequest in project stocator by SparkTC.
the class COSAPIClient method rename.
@Override
public boolean rename(String hostName, String srcPath, String dstPath) throws IOException {
LOG.debug("Rename path {} to {}", srcPath, dstPath);
Path src = new Path(srcPath);
Path dst = new Path(dstPath);
String srcKey = pathToKey(src);
String dstKey = pathToKey(dst);
if (srcKey.isEmpty()) {
throw new IOException("Rename failed " + srcPath + " to " + dstPath + " source is root directory");
}
if (dstKey.isEmpty()) {
throw new IOException("Rename failed " + srcPath + " to " + dstPath + " dest is root directory");
}
// get the source file status; this raises a FNFE if there is no source
// file.
FileStatus srcStatus = getFileStatus(hostName, src, "rename");
if (srcKey.equals(dstKey)) {
LOG.debug("rename: src and dest refer to the same file or directory: {}", dstPath);
throw new IOException("source + " + srcPath + "and dest " + dstPath + " refer to the same file or directory");
}
FileStatus dstStatus = null;
try {
dstStatus = getFileStatus(hostName, dst, "rename");
// whether or not it can be the destination of the rename.
if (srcStatus.isDirectory()) {
if (dstStatus.isFile()) {
throw new IOException("source + " + srcPath + "and dest " + dstPath + "source is a directory and dest is a file");
}
// at this point the destination is an empty directory
} else {
// empty or not
if (dstStatus.isFile()) {
throw new IOException("source + " + srcPath + "and dest " + dstPath + "Cannot rename onto an existing file");
}
}
} catch (FileNotFoundException e) {
LOG.debug("rename: destination path {} not found", dstPath);
}
if (srcStatus.isFile()) {
LOG.debug("rename: renaming file {} to {}", src, dst);
long length = srcStatus.getLen();
if (dstStatus != null && dstStatus.isDirectory()) {
String newDstKey = dstKey;
if (!newDstKey.endsWith("/")) {
newDstKey = newDstKey + "/";
}
String filename = srcKey.substring(pathToKey(src.getParent()).length() + 1);
newDstKey = newDstKey + filename;
copyFile(srcKey, newDstKey, length);
} else {
copyFile(srcKey, dstKey, srcStatus.getLen());
}
delete(hostName, src, false);
} else {
ObjectListing objects = mClient.listObjects(mBucket, srcKey);
List<S3ObjectSummary> summaries = objects.getObjectSummaries();
while (objects.isTruncated()) {
objects = mClient.listNextBatchOfObjects(objects);
summaries.addAll(objects.getObjectSummaries());
}
// Batch copy using TransferManager
// Build a list of copyRequests
List<CopyObjectRequest> copyRequests = new ArrayList<>();
List<KeyVersion> keysToDelete = new ArrayList<>();
for (S3ObjectSummary objectSummary : summaries) {
String newSrcKey = objectSummary.getKey();
keysToDelete.add(new KeyVersion(newSrcKey));
// Just in case there are still folders returned as objects
if (newSrcKey.endsWith("/")) {
LOG.debug("rename: {} is folder and will be ignored", newSrcKey);
continue;
}
long length = objectSummary.getSize();
if ((dstStatus != null && dstStatus.isDirectory()) || (dstStatus == null)) {
String newDstKey = dstKey;
if (!newDstKey.endsWith("/")) {
newDstKey = newDstKey + "/";
}
String filename = newSrcKey.substring(pathToKey(src).length() + 1);
newDstKey = newDstKey + filename;
CopyObjectRequest copyObjectRequest = new CopyObjectRequest(mBucket, newSrcKey, mBucket, newDstKey);
ObjectMetadata srcmd = getObjectMetadata(newSrcKey);
if (srcmd != null) {
copyObjectRequest.setNewObjectMetadata(srcmd);
}
copyRequests.add(copyObjectRequest);
} else {
throw new IOException("Unexpected dstStatus");
}
}
// Submit the copy jobs to transfermanager
CountDownLatch doneSignal = new CountDownLatch(copyRequests.size());
ArrayList copyJobs = new ArrayList();
for (CopyObjectRequest request : copyRequests) {
request.setGeneralProgressListener(new CopyCompleteListener(request.getSourceBucketName() + "/" + request.getSourceKey(), request.getDestinationBucketName() + "/" + request.getDestinationKey(), doneSignal));
copyJobs.add(transfers.copy(request));
}
try {
doneSignal.await();
} catch (AmazonClientException e) {
throw new IOException("Couldn't wait for all copies to be finished");
} catch (InterruptedException e) {
throw new InterruptedIOException("Interrupted copying objects, cancelling");
}
// Delete source objects
DeleteObjectsRequest keysDeleteRequest = new DeleteObjectsRequest(mBucket).withKeys(keysToDelete).withQuiet(false);
// Verify that the object versions were successfully deleted.
DeleteObjectsResult deletedKeys = mClient.deleteObjects(keysDeleteRequest);
int successfulDeletes = deletedKeys.getDeletedObjects().size();
LOG.debug(successfulDeletes + " objects successfully deleted");
}
if (!(src.getParent().equals(dst.getParent()))) {
LOG.debug("{} is not equal to {}. Going to create directory {}", src.getParent(), dst.getParent(), src.getParent());
createDirectoryIfNecessary(hostName, src.getParent());
}
return true;
}
use of software.amazon.awssdk.services.s3.model.DeleteObjectsRequest in project elasticsearch by elastic.
the class AbstractS3SnapshotRestoreTest method cleanRepositoryFiles.
/**
* Deletes content of the repository files in the bucket
*/
public void cleanRepositoryFiles(String basePath) {
Settings settings = internalCluster().getInstance(Settings.class);
Settings[] buckets = { settings.getByPrefix("repositories.s3."), settings.getByPrefix("repositories.s3.private-bucket."), settings.getByPrefix("repositories.s3.remote-bucket."), settings.getByPrefix("repositories.s3.external-bucket.") };
for (Settings bucket : buckets) {
String bucketName = bucket.get("bucket");
// We check that settings has been set in elasticsearch.yml integration test file
// as described in README
assertThat("Your settings in elasticsearch.yml are incorrects. Check README file.", bucketName, notNullValue());
AmazonS3 client = internalCluster().getInstance(AwsS3Service.class).client(Settings.EMPTY, null, randomBoolean(), null);
try {
ObjectListing prevListing = null;
//From http://docs.amazonwebservices.com/AmazonS3/latest/dev/DeletingMultipleObjectsUsingJava.html
//we can do at most 1K objects per delete
//We don't know the bucket name until first object listing
DeleteObjectsRequest multiObjectDeleteRequest = null;
ArrayList<DeleteObjectsRequest.KeyVersion> keys = new ArrayList<DeleteObjectsRequest.KeyVersion>();
while (true) {
ObjectListing list;
if (prevListing != null) {
list = client.listNextBatchOfObjects(prevListing);
} else {
list = client.listObjects(bucketName, basePath);
multiObjectDeleteRequest = new DeleteObjectsRequest(list.getBucketName());
}
for (S3ObjectSummary summary : list.getObjectSummaries()) {
keys.add(new DeleteObjectsRequest.KeyVersion(summary.getKey()));
//Every 500 objects batch the delete request
if (keys.size() > 500) {
multiObjectDeleteRequest.setKeys(keys);
client.deleteObjects(multiObjectDeleteRequest);
multiObjectDeleteRequest = new DeleteObjectsRequest(list.getBucketName());
keys.clear();
}
}
if (list.isTruncated()) {
prevListing = list;
} else {
break;
}
}
if (!keys.isEmpty()) {
multiObjectDeleteRequest.setKeys(keys);
client.deleteObjects(multiObjectDeleteRequest);
}
} catch (Exception ex) {
logger.warn((Supplier<?>) () -> new ParameterizedMessage("Failed to delete S3 repository [{}]", bucketName), ex);
}
}
}
use of software.amazon.awssdk.services.s3.model.DeleteObjectsRequest in project jackrabbit-oak by apache.
the class S3DataStoreUtils method deleteBucket.
public static void deleteBucket(String bucket, Date date) throws Exception {
log.info("cleaning bucket [" + bucket + "]");
Properties props = getS3Config();
AmazonS3Client s3service = Utils.openService(props);
TransferManager tmx = new TransferManager(s3service);
if (s3service.doesBucketExist(bucket)) {
for (int i = 0; i < 4; i++) {
tmx.abortMultipartUploads(bucket, date);
ObjectListing prevObjectListing = s3service.listObjects(bucket);
while (prevObjectListing != null) {
List<DeleteObjectsRequest.KeyVersion> deleteList = new ArrayList<DeleteObjectsRequest.KeyVersion>();
for (S3ObjectSummary s3ObjSumm : prevObjectListing.getObjectSummaries()) {
deleteList.add(new DeleteObjectsRequest.KeyVersion(s3ObjSumm.getKey()));
}
if (deleteList.size() > 0) {
DeleteObjectsRequest delObjsReq = new DeleteObjectsRequest(bucket);
delObjsReq.setKeys(deleteList);
s3service.deleteObjects(delObjsReq);
}
if (!prevObjectListing.isTruncated())
break;
prevObjectListing = s3service.listNextBatchOfObjects(prevObjectListing);
}
}
s3service.deleteBucket(bucket);
log.info("bucket [ " + bucket + "] cleaned");
} else {
log.info("bucket [" + bucket + "] doesn't exists");
}
tmx.shutdownNow();
s3service.shutdown();
}
use of software.amazon.awssdk.services.s3.model.DeleteObjectsRequest in project jackrabbit-oak by apache.
the class S3Backend method renameKeys.
/**
* This method rename object keys in S3 concurrently. The number of
* concurrent threads is defined by 'maxConnections' property in
* aws.properties. As S3 doesn't have "move" command, this method simulate
* move as copy object object to new key and then delete older key.
*/
private void renameKeys() throws DataStoreException {
long startTime = System.currentTimeMillis();
ClassLoader contextClassLoader = Thread.currentThread().getContextClassLoader();
long count = 0;
try {
Thread.currentThread().setContextClassLoader(getClass().getClassLoader());
ObjectListing prevObjectListing = s3service.listObjects(bucket);
List<DeleteObjectsRequest.KeyVersion> deleteList = new ArrayList<DeleteObjectsRequest.KeyVersion>();
int nThreads = Integer.parseInt(properties.getProperty("maxConnections"));
ExecutorService executor = Executors.newFixedThreadPool(nThreads, new NamedThreadFactory("s3-object-rename-worker"));
boolean taskAdded = false;
while (true) {
for (S3ObjectSummary s3ObjSumm : prevObjectListing.getObjectSummaries()) {
executor.execute(new KeyRenameThread(s3ObjSumm.getKey()));
taskAdded = true;
count++;
// delete the object if it follows old key name format
if (s3ObjSumm.getKey().startsWith(KEY_PREFIX)) {
deleteList.add(new DeleteObjectsRequest.KeyVersion(s3ObjSumm.getKey()));
}
}
if (!prevObjectListing.isTruncated())
break;
prevObjectListing = s3service.listNextBatchOfObjects(prevObjectListing);
}
// This will make the executor accept no new threads
// and finish all existing threads in the queue
executor.shutdown();
try {
// Wait until all threads are finish
while (taskAdded && !executor.awaitTermination(10, TimeUnit.SECONDS)) {
LOG.info("Rename S3 keys tasks timedout. Waiting again");
}
} catch (InterruptedException ie) {
}
LOG.info("Renamed [{}] keys, time taken [{}]sec", count, ((System.currentTimeMillis() - startTime) / 1000));
// Delete older keys.
if (deleteList.size() > 0) {
DeleteObjectsRequest delObjsReq = new DeleteObjectsRequest(bucket);
int batchSize = 500, startIndex = 0, size = deleteList.size();
int endIndex = batchSize < size ? batchSize : size;
while (endIndex <= size) {
delObjsReq.setKeys(Collections.unmodifiableList(deleteList.subList(startIndex, endIndex)));
DeleteObjectsResult dobjs = s3service.deleteObjects(delObjsReq);
LOG.info("Records[{}] deleted in datastore from index [{}] to [{}]", new Object[] { dobjs.getDeletedObjects().size(), startIndex, (endIndex - 1) });
if (endIndex == size) {
break;
} else {
startIndex = endIndex;
endIndex = (startIndex + batchSize) < size ? (startIndex + batchSize) : size;
}
}
}
} finally {
if (contextClassLoader != null) {
Thread.currentThread().setContextClassLoader(contextClassLoader);
}
}
}
use of software.amazon.awssdk.services.s3.model.DeleteObjectsRequest in project dataverse by IQSS.
the class S3AccessIO method deleteAllAuxObjects.
@Override
public void deleteAllAuxObjects() throws IOException {
if (!this.canWrite()) {
open(DataAccessOption.WRITE_ACCESS);
}
String prefix = getDestinationKey("");
List<S3ObjectSummary> storedAuxFilesSummary = null;
try {
ListObjectsRequest req = new ListObjectsRequest().withBucketName(bucketName).withPrefix(prefix);
ObjectListing storedAuxFilesList = s3.listObjects(req);
storedAuxFilesSummary = storedAuxFilesList.getObjectSummaries();
while (storedAuxFilesList.isTruncated()) {
storedAuxFilesList = s3.listNextBatchOfObjects(storedAuxFilesList);
storedAuxFilesSummary.addAll(storedAuxFilesList.getObjectSummaries());
}
} catch (AmazonClientException ase) {
logger.warning("Caught an AmazonServiceException: " + ase.getMessage());
throw new IOException("S3AccessIO: Failed to get aux objects for listing to delete.");
}
DeleteObjectsRequest multiObjectDeleteRequest = new DeleteObjectsRequest(bucketName);
List<KeyVersion> keys = new ArrayList<>();
for (S3ObjectSummary item : storedAuxFilesSummary) {
String destinationKey = item.getKey();
keys.add(new KeyVersion(destinationKey));
}
// Check if the list of auxiliary files for a data file is empty
if (keys.isEmpty()) {
logger.fine("S3AccessIO: No auxiliary objects to delete.");
return;
}
multiObjectDeleteRequest.setKeys(keys);
logger.fine("Trying to delete auxiliary files...");
try {
s3.deleteObjects(multiObjectDeleteRequest);
} catch (MultiObjectDeleteException e) {
logger.warning("S3AccessIO: Unable to delete auxilary objects" + e.getMessage());
throw new IOException("S3AccessIO: Failed to delete one or more auxiliary objects.");
}
}
Aggregations