use of com.amazonaws.s3.model.ListObjectsRequest in project goobi-workflow by intranda.
the class S3FileUtils method listDirNames.
@Override
public List<String> listDirNames(String folder) {
StorageType storageType = getPathStorageType(folder);
if (storageType == StorageType.LOCAL) {
return nio.list(folder, NIOFileUtils.folderFilter);
}
String folderPrefix = string2Prefix(folder);
ListObjectsRequest req = new ListObjectsRequest().withBucketName(getBucket()).withPrefix(folderPrefix);
ObjectListing listing = s3.listObjects(req);
Set<String> objs = new HashSet<>();
for (S3ObjectSummary os : listing.getObjectSummaries()) {
String key = os.getKey().replace(folderPrefix, "");
int idx = key.indexOf('/');
if (idx >= 0) {
objs.add(key.substring(0, key.indexOf('/')));
}
}
while (listing.isTruncated()) {
listing = s3.listNextBatchOfObjects(listing);
for (S3ObjectSummary os : listing.getObjectSummaries()) {
String key = os.getKey().replace(folderPrefix, "");
int idx = key.indexOf('/');
if (idx >= 0) {
objs.add(key.substring(0, key.indexOf('/')));
}
}
}
List<String> folders = new ArrayList<>(objs);
Collections.sort(folders);
return folders;
}
use of com.amazonaws.s3.model.ListObjectsRequest in project spark-cassandra-bulkreader by jberragan.
the class S3Client method sstables.
/**
* @return list of sstables found in the instance directory and size in bytes for all file types
*/
public Map<String, Map<DataLayer.FileType, Long>> sstables(String clusterName, String keyspace, String table, String dc, String token) {
final String prefix = instanceKey(clusterName, keyspace, table, dc, token);
final ListObjectsRequest req = new ListObjectsRequest().withPrefix(prefix).withDelimiter("/").withBucketName(this.bucket);
final List<S3ObjectSummary> objects = s3.listObjects(req).getObjectSummaries();
final Map<String, Map<DataLayer.FileType, Long>> sizes = new HashMap<>();
for (final S3ObjectSummary object : objects) {
final String key = object.getKey().replaceFirst(prefix, "");
if (StringUtils.isNullOrEmpty(key)) {
continue;
}
final DataLayer.FileType fileType = DataLayer.FileType.fromExtension(key.substring(key.lastIndexOf("-") + 1));
final String name = key.replace(fileType.getFileSuffix(), "");
sizes.putIfAbsent(name, new HashMap<>(8));
sizes.get(name).put(fileType, object.getSize());
}
return sizes;
}
use of com.amazonaws.s3.model.ListObjectsRequest in project formkiq-core by formkiq.
the class S3Service method deleteAllFiles.
/**
* Delete All Files in bucket.
*
* @param s3 {@link S3Client}
* @param bucket {@link String}
*/
public void deleteAllFiles(final S3Client s3, final String bucket) {
boolean isDone = false;
while (!isDone) {
ListObjectsRequest req = ListObjectsRequest.builder().bucket(bucket).build();
ListObjectsResponse resp = s3.listObjects(req);
for (S3Object s3Object : resp.contents()) {
deleteObject(s3, bucket, s3Object.key());
}
isDone = !resp.isTruncated().booleanValue();
}
}
use of com.amazonaws.s3.model.ListObjectsRequest in project amazon-neptune-tools by awslabs.
the class NeptuneExportService method checkS3OutputIsEmpty.
private void checkS3OutputIsEmpty() {
AmazonS3 s3 = AmazonS3ClientBuilder.defaultClient();
S3ObjectInfo s3ObjectInfo = new S3ObjectInfo(outputS3Path);
ObjectListing listing = s3.listObjects(new ListObjectsRequest(s3ObjectInfo.bucket(), s3ObjectInfo.key(), null, null, 1));
if (!listing.getObjectSummaries().isEmpty()) {
throw new IllegalStateException(String.format("S3 destination contains existing objects: %s. Set 'overwriteExisting' parameter to 'true' to allow overwriting existing objects.", outputS3Path));
}
}
use of com.amazonaws.s3.model.ListObjectsRequest in project extension-s3 by lucee.
the class S3 method get.
public S3Info get(String bucketName, final String objectName) throws S3Exception {
if (Util.isEmpty(objectName)) {
return get(bucketName);
}
bucketName = improveBucketName(bucketName);
String nameFile = improveObjectName(objectName, false);
String nameDir = improveObjectName(objectName, true);
// cache
S3Info info = cacheTimeout <= 0 ? null : exists.get(toKey(bucketName, nameFile));
if (info != null && info.validUntil() >= System.currentTimeMillis()) {
if (info instanceof NotExisting)
return null;
return info;
}
info = null;
AmazonS3Client client = getAmazonS3(bucketName, null);
try {
long validUntil = System.currentTimeMillis() + cacheTimeout;
ObjectListing objects = null;
try {
ListObjectsRequest lor = new ListObjectsRequest();
lor.setBucketName(bucketName);
lor.setPrefix(nameFile);
lor.setMaxKeys(100);
objects = client.listObjects(lor);
} catch (Exception e) {
if (log != null)
log.error("s3", e);
else
e.printStackTrace();
}
/* Recursively delete all the objects inside given bucket */
if (objects == null || objects.getObjectSummaries() == null || objects.getObjectSummaries().size() == 0) {
// we do not return this, we just store it to cache that it
exists.put(toKey(bucketName, objectName), new NotExisting(bucketName, objectName, validUntil, log));
// does
return null;
}
String targetName;
S3ObjectSummary stoObj = null;
int count = 0;
// while (true) {
for (S3ObjectSummary summary : objects.getObjectSummaries()) {
count++;
// direct match
targetName = summary.getKey();
if (nameFile.equals(targetName) || nameDir.equals(targetName)) {
exists.put(toKey(bucketName, nameFile), info = new StorageObjectWrapper(this, stoObj = summary, validUntil, log));
}
// pseudo directory?
// if (info == null) {
targetName = summary.getKey();
if (nameDir.length() < targetName.length() && targetName.startsWith(nameDir)) {
exists.put(toKey(bucketName, nameFile), info = new ParentObject(this, bucketName, nameDir, validUntil, log));
}
// set the value to exist when not a match
if (!(stoObj != null && stoObj.equals(summary))) {
exists.put(toKey(summary.getBucketName(), summary.getKey()), new StorageObjectWrapper(this, summary, validUntil, log));
}
// set all the parents when not exist
// TODO handle that also a file with that name can exist at the same time
String parent = nameFile;
int index;
while ((index = parent.lastIndexOf('/')) != -1) {
parent = parent.substring(0, index);
exists.put(toKey(bucketName, parent), new ParentObject(this, bucketName, parent, validUntil, log));
}
}
// }
if (info == null) {
// we do not return this, we just store it to cache that it does
exists.put(// we do not return this, we just store it to cache that it does
toKey(bucketName, objectName), // we do not return this, we just store it to cache that it does
new NotExisting(bucketName, objectName, validUntil, log));
}
return info;
} catch (AmazonServiceException ase) {
throw toS3Exception(ase);
} finally {
client.release();
}
}
Aggregations