use of com.amazonaws.services.s3.model.Filter in project teamcity-s3-artifact-storage-plugin by JetBrains.
the class S3CloudFrontDistributionCreationController method doPost.
@Override
protected void doPost(@NotNull HttpServletRequest request, @NotNull HttpServletResponse response, @NotNull Element xmlResponse) {
final BasePropertiesBean bean = new BasePropertiesBean(null);
PluginPropertiesUtil.bindPropertiesFromRequest(request, bean);
Map<String, String> params = bean.getProperties();
String projectId = request.getParameter("projectId");
final ActionErrors errors = new ActionErrors();
SProject project = myProjectManager.findProjectByExternalId(projectId);
if (project == null) {
errors.addError(S3_CLOUDFRONT_CREATE_DISTRIBUTION, String.format("Project %s not found", projectId));
} else {
myAccessChecker.checkCanEditProject(project);
String projectName = project.getName();
IOGuard.allowNetworkCall(() -> {
try {
KeyPair keyPair = generateKeyPair();
String bucketName = S3Util.getBucketName(params);
if (keyPair.getPrivate() != null && keyPair.getPublic() != null && bucketName != null) {
String privateKey = toPemString("PRIVATE KEY", keyPair.getPrivate().getEncoded());
String publicKey = toPemString("PUBLIC KEY", keyPair.getPublic().getEncoded());
DistributionDTO distributionDTO = AWSCommonParams.withAWSClients(params, clients -> {
AmazonCloudFront cloudFrontClient = clients.createCloudFrontClient();
AmazonS3 s3Client = clients.createS3Client();
String comment;
long distrCount = cloudFrontClient.listDistributions(new ListDistributionsRequest()).getDistributionList().getItems().stream().filter(d -> d.getComment().startsWith(String.format(COMMENT, projectName))).count();
if (distrCount > 0) {
comment = String.format(NUMBERED_COMMENT, projectName, distrCount);
} else {
comment = String.format(COMMENT, projectName);
}
String name = "generated_" + UUID.randomUUID().toString().substring(0, 8);
String publicKeyId = null;
String keyGroupId = null;
try {
publicKeyId = uploadPublicKey(publicKey, name, comment, cloudFrontClient);
keyGroupId = createKeyGroup(publicKeyId, name, comment, cloudFrontClient);
Distribution distribution = createDistribution(keyGroupId, comment, bucketName, cloudFrontClient, s3Client);
return new DistributionDTO(distribution.getId(), comment, publicKeyId, name, privateKey);
} catch (SdkClientException e) {
errors.addException(S3_CLOUDFRONT_CREATE_DISTRIBUTION, e);
if (keyGroupId != null) {
try {
cloudFrontClient.deleteKeyGroup(new DeleteKeyGroupRequest().withId(keyGroupId));
} catch (SdkClientException clientException) {
LOG.warnAndDebugDetails("Encountered exception while trying to delete CloudFront key group", clientException);
}
}
if (publicKeyId != null) {
try {
cloudFrontClient.deletePublicKey(new DeletePublicKeyRequest().withId(publicKeyId));
} catch (SdkClientException clientException) {
LOG.warnAndDebugDetails("Encountered exception while trying to delete CloudFront public key", clientException);
}
}
}
return null;
});
if (distributionDTO != null) {
Element element = S3XmlSerializerFactory.getInstance().serializeAsElement(distributionDTO);
xmlResponse.addContent(element);
}
}
} catch (IllegalArgumentException | SdkClientException | IOException | NoSuchAlgorithmException e) {
errors.addException(S3_CLOUDFRONT_CREATE_DISTRIBUTION, e);
}
});
}
errors.serialize(xmlResponse);
}
use of com.amazonaws.services.s3.model.Filter in project vividus by vividus-framework.
the class S3BucketSteps method collectObjectKeys.
/**
* <p>
* Collects a list of the S3 objects keys in the specified bucket and saves its content to <b>scopes</b> variables
* with name <b>variableName</b>.
* </p>
* <p>
* Because buckets can contain a virtually unlimited number of keys, the complete results can be extremely large,
* thus it's recommended to use filters to retrieve the filtered dataset.
* </p>
*
* @param filters The ExamplesTable with filters to be applied to the objects to limit the resulting set.
* The supported filter types are:
* <ul>
* <li><code>KEY_PREFIX</code> - the prefix parameter, restricting to keys that begin with
* the specified value.</li>
* <li><code>KEY_SUFFIX</code> - the suffix parameter, restricting to keys that end with the
* specified value.</li>
* <li><code>OBJECT_MODIFIED_NOT_EARLIER_THAN</code> - the ISO-8601 date, restricting to objects
* with last modified date after the specified value.</li>
* </ul>
* The filters can be combined in any order and in any composition, e.g.<br>
* <code>
* |filterType |filterValue |<br>
* |key suffix |.txt |<br>
* |object modified not earlier than|2021-01-15T19:00:00+00:00 |<br>
* </code>
*
* @param bucketName The name of the S3 bucket which objects keys are to be collected
* @param scopes The set (comma separated list of scopes e.g.: STORY, NEXT_BATCHES) of variables scopes<br>
* <i>Available scopes:</i>
* <ul>
* <li><b>STEP</b> - the variable will be available only within the step,
* <li><b>SCENARIO</b> - the variable will be available only within the scenario,
* <li><b>STORY</b> - the variable will be available within the whole story,
* <li><b>NEXT_BATCHES</b> - the variable will be available starting from next batch
* </ul>@param scopes
* @param variableName the variable name to store the S3 objects keys. The keys are accessible via zero-based index,
* e.g. <code>${my-keys[0]}</code> will return the first found key.
*/
@When("I collect objects keys filtered by:$filters in S3 bucket `$bucketName` and save result to $scopes variable " + "`$variableName`")
public void collectObjectKeys(List<S3ObjectFilter> filters, String bucketName, Set<VariableScope> scopes, String variableName) {
Map<S3ObjectFilterType, String> filterParameters = filters.stream().collect(toMap(S3ObjectFilter::getFilterType, S3ObjectFilter::getFilterValue));
ListObjectsV2Request request = new ListObjectsV2Request().withBucketName(bucketName);
Optional.ofNullable(filterParameters.get(S3ObjectFilterType.KEY_PREFIX)).ifPresent(request::setPrefix);
Predicate<S3ObjectSummary> filter = buildFilter(filterParameters);
List<String> keys = collectS3ObjectsKeys(request, filter);
variableContext.putVariable(scopes, variableName, keys);
}
use of com.amazonaws.services.s3.model.Filter in project vividus by vividus-framework.
the class S3BucketSteps method collectS3ObjectsKeys.
private List<String> collectS3ObjectsKeys(ListObjectsV2Request request, Predicate<S3ObjectSummary> filter) {
ListObjectsV2Result result;
List<String> keys = new ArrayList<>();
int totalNumberOfObjects = 0;
do {
result = amazonS3Client.listObjectsV2(request);
List<S3ObjectSummary> objectSummaries = result.getObjectSummaries();
totalNumberOfObjects += objectSummaries.size();
objectSummaries.stream().filter(filter).map(S3ObjectSummary::getKey).forEach(keys::add);
request.setContinuationToken(result.getNextContinuationToken());
} while (result.isTruncated());
LOGGER.info("The total number of S3 objects is {}", totalNumberOfObjects);
LOGGER.atInfo().addArgument(keys::size).log("The number of S3 objects after filtering is {}");
return keys;
}
use of com.amazonaws.services.s3.model.Filter in project chipster-web-server by chipster.
the class StorageBackup method listFiles.
/**
* List recursively all files under dir
*
* @param dir
* @param exclude exclude files under this path
* @throws IOException
*/
private Map<Path, InfoLine> listFiles(Path dir, Path exclude) throws IOException {
HashMap<Path, InfoLine> fileMap = new HashMap<>();
Files.walk(dir, FileVisitOption.FOLLOW_LINKS).filter(Files::isRegularFile).filter(path -> !path.startsWith(exclude)).map(path -> dir.relativize(path)).filter(file -> {
// check that filenames don't contain delimiters of the info files
if (file.toString().contains("\n") || file.toString().contains("\t")) {
logger.warn("file " + file + " is skipped because it contains a new line and tab character in the filename");
return false;
}
return true;
}).map(file -> {
try {
return new InfoLine(file, Files.size(storage.resolve(file)), null, null, -1, null, null);
} catch (IOException e) {
throw new RuntimeException("failed to get the size of file " + file, e);
}
}).forEach(info -> {
fileMap.put(info.getPath(), info);
});
return fileMap;
}
use of com.amazonaws.services.s3.model.Filter in project chipster-web-server by chipster.
the class BackupArchive method archive.
private void archive(TransferManager transferManager, String backupNamePrefix, Path archiveRootPath, String role, String backupName, String bucket, List<S3ObjectSummary> objects) throws IOException, InterruptedException, ArchiveException {
Path currentBackupPath = archiveRootPath.resolve(backupName);
Path downloadPath = currentBackupPath.resolve("download");
if (Files.exists(currentBackupPath)) {
throw new ArchiveException("archive path " + currentBackupPath + " exists already. Is other process running?");
}
String key = backupName + "/" + BACKUP_INFO;
Map<Path, InfoLine> backupInfoMap = BackupUtils.infoFileToMap(transferManager, bucket, key, currentBackupPath);
List<String> backupObjects = objects.stream().map(o -> o.getKey()).filter(name -> name.startsWith(backupName + "/")).collect(Collectors.toList());
List<InfoLine> newFileInfos = backupInfoMap.values().stream().filter(info -> backupName.equals(info.getBackupName())).collect(Collectors.toList());
List<String> archiveNames = backupInfoMap.values().stream().filter(info -> !backupName.equals(info.getBackupName())).map(info -> info.getBackupName()).distinct().collect(Collectors.toList());
logger.info("the backup has " + newFileInfos.size() + " new files in " + (backupObjects.size() - 1) + " packages");
if (archiveNames.size() == 0) {
logger.info("no files will moved from the old archives");
} else if (archiveNames.size() == 1) {
logger.info((backupInfoMap.size() - newFileInfos.size()) + " files will be moved from the archive " + archiveNames.get(0));
} else {
// this isn't used at the moment
logger.warn("the backup is using files from several archive versions (current is " + backupName + "): " + archiveNames);
}
downloadPath.toFile().mkdirs();
downloadFiles(backupObjects, bucket, transferManager, downloadPath);
collectFiles(archiveRootPath, currentBackupPath, downloadPath, backupInfoMap, backupName);
FileUtils.deleteDirectory(downloadPath.toFile());
logger.info("upload archive info to " + bucket + "/" + backupName + "/" + ARCHIVE_INFO + " for next incremental backup");
Path archiveInfoPath = writeArchiveInfo(currentBackupPath, backupInfoMap);
Upload upload = transferManager.upload(bucket, backupName + "/" + ARCHIVE_INFO, archiveInfoPath.toFile());
upload.waitForCompletion();
logger.info("backup archiving done");
}
Aggregations