Search in sources :

Example 6 with Filter

use of com.amazonaws.services.s3.model.Filter in project teamcity-s3-artifact-storage-plugin by JetBrains.

the class S3CloudFrontDistributionCreationController method doPost.

@Override
protected void doPost(@NotNull HttpServletRequest request, @NotNull HttpServletResponse response, @NotNull Element xmlResponse) {
    final BasePropertiesBean bean = new BasePropertiesBean(null);
    PluginPropertiesUtil.bindPropertiesFromRequest(request, bean);
    Map<String, String> params = bean.getProperties();
    String projectId = request.getParameter("projectId");
    final ActionErrors errors = new ActionErrors();
    SProject project = myProjectManager.findProjectByExternalId(projectId);
    if (project == null) {
        errors.addError(S3_CLOUDFRONT_CREATE_DISTRIBUTION, String.format("Project %s not found", projectId));
    } else {
        myAccessChecker.checkCanEditProject(project);
        String projectName = project.getName();
        IOGuard.allowNetworkCall(() -> {
            try {
                KeyPair keyPair = generateKeyPair();
                String bucketName = S3Util.getBucketName(params);
                if (keyPair.getPrivate() != null && keyPair.getPublic() != null && bucketName != null) {
                    String privateKey = toPemString("PRIVATE KEY", keyPair.getPrivate().getEncoded());
                    String publicKey = toPemString("PUBLIC KEY", keyPair.getPublic().getEncoded());
                    DistributionDTO distributionDTO = AWSCommonParams.withAWSClients(params, clients -> {
                        AmazonCloudFront cloudFrontClient = clients.createCloudFrontClient();
                        AmazonS3 s3Client = clients.createS3Client();
                        String comment;
                        long distrCount = cloudFrontClient.listDistributions(new ListDistributionsRequest()).getDistributionList().getItems().stream().filter(d -> d.getComment().startsWith(String.format(COMMENT, projectName))).count();
                        if (distrCount > 0) {
                            comment = String.format(NUMBERED_COMMENT, projectName, distrCount);
                        } else {
                            comment = String.format(COMMENT, projectName);
                        }
                        String name = "generated_" + UUID.randomUUID().toString().substring(0, 8);
                        String publicKeyId = null;
                        String keyGroupId = null;
                        try {
                            publicKeyId = uploadPublicKey(publicKey, name, comment, cloudFrontClient);
                            keyGroupId = createKeyGroup(publicKeyId, name, comment, cloudFrontClient);
                            Distribution distribution = createDistribution(keyGroupId, comment, bucketName, cloudFrontClient, s3Client);
                            return new DistributionDTO(distribution.getId(), comment, publicKeyId, name, privateKey);
                        } catch (SdkClientException e) {
                            errors.addException(S3_CLOUDFRONT_CREATE_DISTRIBUTION, e);
                            if (keyGroupId != null) {
                                try {
                                    cloudFrontClient.deleteKeyGroup(new DeleteKeyGroupRequest().withId(keyGroupId));
                                } catch (SdkClientException clientException) {
                                    LOG.warnAndDebugDetails("Encountered exception while trying to delete CloudFront key group", clientException);
                                }
                            }
                            if (publicKeyId != null) {
                                try {
                                    cloudFrontClient.deletePublicKey(new DeletePublicKeyRequest().withId(publicKeyId));
                                } catch (SdkClientException clientException) {
                                    LOG.warnAndDebugDetails("Encountered exception while trying to delete CloudFront public key", clientException);
                                }
                            }
                        }
                        return null;
                    });
                    if (distributionDTO != null) {
                        Element element = S3XmlSerializerFactory.getInstance().serializeAsElement(distributionDTO);
                        xmlResponse.addContent(element);
                    }
                }
            } catch (IllegalArgumentException | SdkClientException | IOException | NoSuchAlgorithmException e) {
                errors.addException(S3_CLOUDFRONT_CREATE_DISTRIBUTION, e);
            }
        });
    }
    errors.serialize(xmlResponse);
}
Also used : Policy(com.amazonaws.auth.policy.Policy) Principal(com.amazonaws.auth.policy.Principal) BucketPolicy(com.amazonaws.services.s3.model.BucketPolicy) ZonedDateTime(java.time.ZonedDateTime) S3Util(jetbrains.buildServer.artifacts.s3.S3Util) S3ObjectResource(com.amazonaws.auth.policy.resources.S3ObjectResource) S3Actions(com.amazonaws.auth.policy.actions.S3Actions) IOGuard(jetbrains.buildServer.serverSide.IOGuard) AWSCommonParams(jetbrains.buildServer.util.amazon.AWSCommonParams) HttpServletRequest(javax.servlet.http.HttpServletRequest) Map(java.util.Map) BaseFormXmlController(jetbrains.buildServer.controllers.BaseFormXmlController) AmazonS3(com.amazonaws.services.s3.AmazonS3) ProjectManager(jetbrains.buildServer.serverSide.ProjectManager) Statement(com.amazonaws.auth.policy.Statement) ZoneOffset(java.time.ZoneOffset) Logger(com.intellij.openapi.diagnostic.Logger) PemObject(org.bouncycastle.util.io.pem.PemObject) java.security(java.security) Used(jetbrains.buildServer.Used) AmazonCloudFront(com.amazonaws.services.cloudfront.AmazonCloudFront) Predicate(java.util.function.Predicate) StringWriter(java.io.StringWriter) Collection(java.util.Collection) HttpServletResponse(javax.servlet.http.HttpServletResponse) BasePropertiesBean(jetbrains.buildServer.controllers.BasePropertiesBean) AccessChecker(jetbrains.buildServer.serverSide.auth.AccessChecker) IOException(java.io.IOException) UUID(java.util.UUID) XmlRootElement(javax.xml.bind.annotation.XmlRootElement) Collectors(java.util.stream.Collectors) PluginDescriptor(jetbrains.buildServer.web.openapi.PluginDescriptor) BouncyCastleProvider(org.bouncycastle.jce.provider.BouncyCastleProvider) Nullable(org.jetbrains.annotations.Nullable) ModelAndView(org.springframework.web.servlet.ModelAndView) SdkClientException(com.amazonaws.SdkClientException) List(java.util.List) PemWriter(org.bouncycastle.util.io.pem.PemWriter) S3XmlSerializerFactory(jetbrains.buildServer.artifacts.s3.serialization.S3XmlSerializerFactory) PluginPropertiesUtil(jetbrains.buildServer.controllers.admin.projects.PluginPropertiesUtil) CloudFrontConstants(jetbrains.buildServer.artifacts.s3.cloudfront.CloudFrontConstants) ActionErrors(jetbrains.buildServer.controllers.ActionErrors) SProject(jetbrains.buildServer.serverSide.SProject) WebControllerManager(jetbrains.buildServer.web.openapi.WebControllerManager) NotNull(org.jetbrains.annotations.NotNull) Element(org.jdom.Element) com.amazonaws.services.cloudfront.model(com.amazonaws.services.cloudfront.model) AmazonS3(com.amazonaws.services.s3.AmazonS3) XmlRootElement(javax.xml.bind.annotation.XmlRootElement) Element(org.jdom.Element) IOException(java.io.IOException) SProject(jetbrains.buildServer.serverSide.SProject) ActionErrors(jetbrains.buildServer.controllers.ActionErrors) BasePropertiesBean(jetbrains.buildServer.controllers.BasePropertiesBean) SdkClientException(com.amazonaws.SdkClientException) AmazonCloudFront(com.amazonaws.services.cloudfront.AmazonCloudFront)

Example 7 with Filter

use of com.amazonaws.services.s3.model.Filter in project vividus by vividus-framework.

the class S3BucketSteps method collectObjectKeys.

/**
 * <p>
 * Collects a list of the S3 objects keys in the specified bucket and saves its content to <b>scopes</b> variables
 * with name <b>variableName</b>.
 * </p>
 * <p>
 * Because buckets can contain a virtually unlimited number of keys, the complete results can be extremely large,
 * thus it's recommended to use filters to retrieve the filtered dataset.
 * </p>
 *
 * @param filters      The ExamplesTable with filters to be applied to the objects to limit the resulting set.
 *                     The supported filter types are:
 *                     <ul>
 *                     <li><code>KEY_PREFIX</code> - the prefix parameter, restricting to keys that begin with
 *                     the specified value.</li>
 *                     <li><code>KEY_SUFFIX</code> - the suffix parameter, restricting to keys that end with the
 *                     specified value.</li>
 *                     <li><code>OBJECT_MODIFIED_NOT_EARLIER_THAN</code> - the ISO-8601 date, restricting to objects
 *                     with last modified date after the specified value.</li>
 *                     </ul>
 *                     The filters can be combined in any order and in any composition, e.g.<br>
 *                     <code>
 *                     |filterType                      |filterValue               |<br>
 *                     |key suffix                      |.txt                      |<br>
 *                     |object modified not earlier than|2021-01-15T19:00:00+00:00 |<br>
 *                     </code>
 *
 * @param bucketName   The name of the S3 bucket which objects keys are to be collected
 * @param scopes       The set (comma separated list of scopes e.g.: STORY, NEXT_BATCHES) of variables scopes<br>
 *                     <i>Available scopes:</i>
 *                     <ul>
 *                     <li><b>STEP</b> - the variable will be available only within the step,
 *                     <li><b>SCENARIO</b> - the variable will be available only within the scenario,
 *                     <li><b>STORY</b> - the variable will be available within the whole story,
 *                     <li><b>NEXT_BATCHES</b> - the variable will be available starting from next batch
 *                     </ul>@param scopes
 * @param variableName the variable name to store the S3 objects keys. The keys are accessible via zero-based index,
 *                     e.g. <code>${my-keys[0]}</code> will return the first found key.
 */
@When("I collect objects keys filtered by:$filters in S3 bucket `$bucketName` and save result to $scopes variable " + "`$variableName`")
public void collectObjectKeys(List<S3ObjectFilter> filters, String bucketName, Set<VariableScope> scopes, String variableName) {
    Map<S3ObjectFilterType, String> filterParameters = filters.stream().collect(toMap(S3ObjectFilter::getFilterType, S3ObjectFilter::getFilterValue));
    ListObjectsV2Request request = new ListObjectsV2Request().withBucketName(bucketName);
    Optional.ofNullable(filterParameters.get(S3ObjectFilterType.KEY_PREFIX)).ifPresent(request::setPrefix);
    Predicate<S3ObjectSummary> filter = buildFilter(filterParameters);
    List<String> keys = collectS3ObjectsKeys(request, filter);
    variableContext.putVariable(scopes, variableName, keys);
}
Also used : ListObjectsV2Request(com.amazonaws.services.s3.model.ListObjectsV2Request) S3ObjectSummary(com.amazonaws.services.s3.model.S3ObjectSummary) When(org.jbehave.core.annotations.When)

Example 8 with Filter

use of com.amazonaws.services.s3.model.Filter in project vividus by vividus-framework.

the class S3BucketSteps method collectS3ObjectsKeys.

private List<String> collectS3ObjectsKeys(ListObjectsV2Request request, Predicate<S3ObjectSummary> filter) {
    ListObjectsV2Result result;
    List<String> keys = new ArrayList<>();
    int totalNumberOfObjects = 0;
    do {
        result = amazonS3Client.listObjectsV2(request);
        List<S3ObjectSummary> objectSummaries = result.getObjectSummaries();
        totalNumberOfObjects += objectSummaries.size();
        objectSummaries.stream().filter(filter).map(S3ObjectSummary::getKey).forEach(keys::add);
        request.setContinuationToken(result.getNextContinuationToken());
    } while (result.isTruncated());
    LOGGER.info("The total number of S3 objects is {}", totalNumberOfObjects);
    LOGGER.atInfo().addArgument(keys::size).log("The number of S3 objects after filtering is {}");
    return keys;
}
Also used : ListObjectsV2Result(com.amazonaws.services.s3.model.ListObjectsV2Result) ArrayList(java.util.ArrayList) S3ObjectSummary(com.amazonaws.services.s3.model.S3ObjectSummary)

Example 9 with Filter

use of com.amazonaws.services.s3.model.Filter in project chipster-web-server by chipster.

the class StorageBackup method listFiles.

/**
 * List recursively all files under dir
 *
 * @param dir
 * @param exclude exclude files under this path
 * @throws IOException
 */
private Map<Path, InfoLine> listFiles(Path dir, Path exclude) throws IOException {
    HashMap<Path, InfoLine> fileMap = new HashMap<>();
    Files.walk(dir, FileVisitOption.FOLLOW_LINKS).filter(Files::isRegularFile).filter(path -> !path.startsWith(exclude)).map(path -> dir.relativize(path)).filter(file -> {
        // check that filenames don't contain delimiters of the info files
        if (file.toString().contains("\n") || file.toString().contains("\t")) {
            logger.warn("file " + file + " is skipped because it contains a new line and tab character in the filename");
            return false;
        }
        return true;
    }).map(file -> {
        try {
            return new InfoLine(file, Files.size(storage.resolve(file)), null, null, -1, null, null);
        } catch (IOException e) {
            throw new RuntimeException("failed to get the size of file " + file, e);
        }
    }).forEach(info -> {
        fileMap.put(info.getPath(), info);
    });
    return fileMap;
}
Also used : Path(java.nio.file.Path) TransferManager(com.amazonaws.services.s3.transfer.TransferManager) ScheduledFuture(java.util.concurrent.ScheduledFuture) Role(fi.csc.chipster.auth.model.Role) HashMap(java.util.HashMap) BackupUtils(fi.csc.chipster.archive.BackupUtils) Future(java.util.concurrent.Future) Calendar(java.util.Calendar) Charset(java.nio.charset.Charset) Map(java.util.Map) ScheduledExecutorService(java.util.concurrent.ScheduledExecutorService) S3ObjectSummary(com.amazonaws.services.s3.model.S3ObjectSummary) Path(java.nio.file.Path) AmazonServiceException(com.amazonaws.AmazonServiceException) BackupArchive(fi.csc.chipster.archive.BackupArchive) Files(java.nio.file.Files) StatusSource(fi.csc.chipster.rest.StatusSource) StandardOpenOption(java.nio.file.StandardOpenOption) Set(java.util.Set) IOException(java.io.IOException) FileUtils(org.apache.commons.io.FileUtils) Instant(java.time.Instant) Collectors(java.util.stream.Collectors) Executors(java.util.concurrent.Executors) TimeUnit(java.util.concurrent.TimeUnit) S3Util(fi.csc.chipster.rest.hibernate.S3Util) List(java.util.List) ChronoUnit(java.time.temporal.ChronoUnit) Logger(org.apache.logging.log4j.Logger) InfoLine(fi.csc.chipster.archive.InfoLine) FileVisitOption(java.nio.file.FileVisitOption) Paths(java.nio.file.Paths) Config(fi.csc.chipster.rest.Config) Entry(java.util.Map.Entry) ArchiveException(fi.csc.chipster.archive.ArchiveException) Collections(java.util.Collections) LogManager(org.apache.logging.log4j.LogManager) AmazonClientException(com.amazonaws.AmazonClientException) InfoLine(fi.csc.chipster.archive.InfoLine) HashMap(java.util.HashMap) IOException(java.io.IOException)

Example 10 with Filter

use of com.amazonaws.services.s3.model.Filter in project chipster-web-server by chipster.

the class BackupArchive method archive.

private void archive(TransferManager transferManager, String backupNamePrefix, Path archiveRootPath, String role, String backupName, String bucket, List<S3ObjectSummary> objects) throws IOException, InterruptedException, ArchiveException {
    Path currentBackupPath = archiveRootPath.resolve(backupName);
    Path downloadPath = currentBackupPath.resolve("download");
    if (Files.exists(currentBackupPath)) {
        throw new ArchiveException("archive path " + currentBackupPath + " exists already. Is other process running?");
    }
    String key = backupName + "/" + BACKUP_INFO;
    Map<Path, InfoLine> backupInfoMap = BackupUtils.infoFileToMap(transferManager, bucket, key, currentBackupPath);
    List<String> backupObjects = objects.stream().map(o -> o.getKey()).filter(name -> name.startsWith(backupName + "/")).collect(Collectors.toList());
    List<InfoLine> newFileInfos = backupInfoMap.values().stream().filter(info -> backupName.equals(info.getBackupName())).collect(Collectors.toList());
    List<String> archiveNames = backupInfoMap.values().stream().filter(info -> !backupName.equals(info.getBackupName())).map(info -> info.getBackupName()).distinct().collect(Collectors.toList());
    logger.info("the backup has " + newFileInfos.size() + " new files in " + (backupObjects.size() - 1) + " packages");
    if (archiveNames.size() == 0) {
        logger.info("no files will moved from the old archives");
    } else if (archiveNames.size() == 1) {
        logger.info((backupInfoMap.size() - newFileInfos.size()) + " files will be moved from the archive " + archiveNames.get(0));
    } else {
        // this isn't used at the moment
        logger.warn("the backup is using files from several archive versions (current is " + backupName + "): " + archiveNames);
    }
    downloadPath.toFile().mkdirs();
    downloadFiles(backupObjects, bucket, transferManager, downloadPath);
    collectFiles(archiveRootPath, currentBackupPath, downloadPath, backupInfoMap, backupName);
    FileUtils.deleteDirectory(downloadPath.toFile());
    logger.info("upload archive info to " + bucket + "/" + backupName + "/" + ARCHIVE_INFO + " for next incremental backup");
    Path archiveInfoPath = writeArchiveInfo(currentBackupPath, backupInfoMap);
    Upload upload = transferManager.upload(bucket, backupName + "/" + ARCHIVE_INFO, archiveInfoPath.toFile());
    upload.waitForCompletion();
    logger.info("backup archiving done");
}
Also used : Path(java.nio.file.Path) TransferManager(com.amazonaws.services.s3.transfer.TransferManager) Role(fi.csc.chipster.auth.model.Role) Download(com.amazonaws.services.s3.transfer.Download) HashSet(java.util.HashSet) Charset(java.nio.charset.Charset) Map(java.util.Map) S3ObjectSummary(com.amazonaws.services.s3.model.S3ObjectSummary) ProcessUtils(fi.csc.chipster.rest.ProcessUtils) Path(java.nio.file.Path) AmazonServiceException(com.amazonaws.AmazonServiceException) DbBackup(fi.csc.chipster.rest.hibernate.DbBackup) Files(java.nio.file.Files) StandardOpenOption(java.nio.file.StandardOpenOption) Upload(com.amazonaws.services.s3.transfer.Upload) Set(java.util.Set) IOException(java.io.IOException) FileUtils(org.apache.commons.io.FileUtils) Instant(java.time.Instant) Collectors(java.util.stream.Collectors) S3Util(fi.csc.chipster.rest.hibernate.S3Util) List(java.util.List) ChronoUnit(java.time.temporal.ChronoUnit) Stream(java.util.stream.Stream) Logger(org.apache.logging.log4j.Logger) TreeMap(java.util.TreeMap) Paths(java.nio.file.Paths) Config(fi.csc.chipster.rest.Config) Optional(java.util.Optional) BackupRotation2(fi.csc.chipster.rest.hibernate.BackupRotation2) Collections(java.util.Collections) LogManager(org.apache.logging.log4j.LogManager) AmazonClientException(com.amazonaws.AmazonClientException) Upload(com.amazonaws.services.s3.transfer.Upload)

Aggregations

Filter (com.amazonaws.services.ec2.model.Filter)96 ArrayList (java.util.ArrayList)70 List (java.util.List)52 Collectors (java.util.stream.Collectors)46 IOException (java.io.IOException)41 HashMap (java.util.HashMap)38 Map (java.util.Map)35 AmazonS3 (com.amazonaws.services.s3.AmazonS3)34 Set (java.util.Set)31 DescribeInstancesRequest (com.amazonaws.services.ec2.model.DescribeInstancesRequest)30 S3ObjectSummary (com.amazonaws.services.s3.model.S3ObjectSummary)27 Instance (com.amazonaws.services.ec2.model.Instance)26 HashSet (java.util.HashSet)26 Reservation (com.amazonaws.services.ec2.model.Reservation)24 Collections (java.util.Collections)23 DescribeInstancesResult (com.amazonaws.services.ec2.model.DescribeInstancesResult)21 ObjectListing (com.amazonaws.services.s3.model.ObjectListing)21 DescribeSubnetsRequest (com.amazonaws.services.ec2.model.DescribeSubnetsRequest)20 Entry (java.util.Map.Entry)20 Tag (com.amazonaws.services.ec2.model.Tag)18