use of com.amazonaws.services.s3.model.Filter in project esop by instaclustr.
the class BaseS3Restorer method listBucket.
private List<S3ObjectSummary> listBucket(final String remotePrefix, final Predicate<String> keyFilter) {
ObjectListing objectListing = amazonS3.listObjects(request.storageLocation.bucket, remotePrefix);
boolean hasMoreContent = true;
final List<S3ObjectSummary> summaryList = new ArrayList<>();
while (hasMoreContent) {
objectListing.getObjectSummaries().stream().filter(// no dirs
objectSummary -> !objectSummary.getKey().endsWith("/")).filter(file -> keyFilter.test(file.getKey())).collect(toCollection(() -> summaryList));
if (objectListing.isTruncated()) {
objectListing = amazonS3.listNextBatchOfObjects(objectListing);
} else {
hasMoreContent = false;
}
}
return summaryList;
}
use of com.amazonaws.services.s3.model.Filter in project solarnetwork-central by SolarNetwork.
the class S3DatumExportDestinationServiceTests method settingSpecifiers.
@Test
public void settingSpecifiers() {
// given
S3DatumExportDestinationService service = new S3DatumExportDestinationService();
// when
List<SettingSpecifier> specs = service.getSettingSpecifiers();
// then
assertThat("Setting specs provided", specs, hasSize(5));
Set<String> keys = specs.stream().filter(s -> s instanceof KeyedSettingSpecifier<?>).map(s -> ((KeyedSettingSpecifier<?>) s).getKey()).collect(Collectors.toSet());
assertThat("Setting keys", keys, containsInAnyOrder("accessKey", "secretKey", "path", "filenameTemplate", "storageClass"));
}
use of com.amazonaws.services.s3.model.Filter in project trellis-extensions by trellis-ldp.
the class S3MementoService method put.
@Override
public CompletionStage<Void> put(final Resource resource) {
return runAsync(() -> {
try {
final File file = createTempFile("trellis-memento-", ".nq");
file.deleteOnExit();
final Map<String, String> metadata = new HashMap<>();
metadata.put(S3Resource.INTERACTION_MODEL, resource.getInteractionModel().getIRIString());
metadata.put(S3Resource.MODIFIED, resource.getModified().toString());
resource.getContainer().map(IRI::getIRIString).ifPresent(c -> metadata.put(S3Resource.CONTAINER, c));
resource.getBinaryMetadata().ifPresent(b -> {
metadata.put(S3Resource.BINARY_LOCATION, b.getIdentifier().getIRIString());
b.getMimeType().ifPresent(m -> metadata.put(S3Resource.BINARY_TYPE, m));
});
resource.getMembershipResource().map(IRI::getIRIString).ifPresent(m -> metadata.put(S3Resource.MEMBERSHIP_RESOURCE, m));
resource.getMemberRelation().map(IRI::getIRIString).ifPresent(m -> metadata.put(S3Resource.MEMBER_RELATION, m));
resource.getMemberOfRelation().map(IRI::getIRIString).ifPresent(m -> metadata.put(S3Resource.MEMBER_OF_RELATION, m));
resource.getInsertedContentRelation().map(IRI::getIRIString).ifPresent(m -> metadata.put(S3Resource.INSERTED_CONTENT_RELATION, m));
try (final Dataset dataset = rdf.createDataset();
final OutputStream output = Files.newOutputStream(file.toPath());
final Stream<? extends Quad> quads = resource.stream()) {
quads.forEachOrdered(dataset::add);
metadata.put(S3Resource.METADATA_GRAPHS, dataset.getGraphNames().filter(IRI.class::isInstance).map(IRI.class::cast).filter(graph -> !IGNORE.contains(graph)).map(IRI::getIRIString).collect(joining(",")));
RDFDataMgr.write(output, toJena(dataset), NQUADS);
}
final ObjectMetadata md = new ObjectMetadata();
md.setContentType("application/n-quads");
md.setUserMetadata(metadata);
final PutObjectRequest req = new PutObjectRequest(bucketName, getKey(resource.getIdentifier(), resource.getModified().truncatedTo(SECONDS)), file);
client.putObject(req.withMetadata(md));
Files.delete(file.toPath());
} catch (final Exception ex) {
throw new TrellisRuntimeException("Error deleting locally buffered file", ex);
}
});
}
use of com.amazonaws.services.s3.model.Filter in project nrtsearch by Yelp.
the class BackupRestoreIndexRequestHandlerTest method getFiles.
public List<String> getFiles(Path basePath) {
List<String> result = new ArrayList<>();
ImmutableList<File> childFiles = FileUtils.listFiles(basePath.toFile());
for (File childFile : childFiles) {
if (Files.isDirectory(childFile.toPath())) {
result.addAll(getFiles(childFile.toPath()));
} else if (Files.isRegularFile(childFile.toPath())) {
result.add(childFile.getName());
}
}
return result.stream().filter(x -> !x.startsWith("snapshots") && !x.startsWith("stateRefCounts")).collect(Collectors.toList());
}
use of com.amazonaws.services.s3.model.Filter in project datapull by homeaway.
the class DataPullRequestProcessor method runDataPull.
private void runDataPull(String json, boolean isStart, boolean validateJson) throws ProcessingException {
String originalInputJson = json;
json = extractUserJsonFromS3IfProvided(json, isStart);
final EMRProperties emrProperties = this.config.getEmrProperties();
if (log.isDebugEnabled())
log.debug("runDataPull -> json = " + json + " isStart = " + isStart);
try {
if (validateJson) {
json = validateAndEnrich(json);
}
log.info("Running datapull for json : " + json + " cron expression = " + isStart + "env =" + env);
final ObjectNode node = new ObjectMapper().readValue(json, ObjectNode.class);
List<Map.Entry<String, JsonNode>> result = new LinkedList<Map.Entry<String, JsonNode>>();
Iterator<Map.Entry<String, JsonNode>> nodes = node.fields();
while (nodes.hasNext()) {
result.add(nodes.next());
}
JsonNode clusterNode = result.stream().filter(y -> y.getKey().equalsIgnoreCase("cluster")).map(x -> x.getValue()).findAny().get();
JsonNode migrationsNode = result.stream().filter(y -> y.getKey().equalsIgnoreCase("migrations")).map(x -> x.getValue()).findAny().get();
if (clusterNode == null)
throw new ProcessingException("Invalid Json!!! Cluster properties cannot be null");
String creator = node.has(CREATOR) ? node.findValue(CREATOR).asText() : "";
ObjectMapper mapper = new ObjectMapper();
ClusterProperties reader = mapper.treeToValue(clusterNode, ClusterProperties.class);
Migration[] myObjects = mapper.treeToValue(migrationsNode, Migration[].class);
String cronExp = Objects.toString(reader.getCronExpression(), "");
if (!cronExp.isEmpty())
cronExp = validateAndProcessCronExpression(cronExp);
String pipeline = Objects.toString(reader.getPipelineName(), UUID.randomUUID().toString());
String pipelineEnv = Objects.toString(reader.getAwsEnv(), env);
DataPullProperties dataPullProperties = config.getDataPullProperties();
String applicationHistoryFolder = dataPullProperties.getApplicationHistoryFolder();
String s3RepositoryBucketName = dataPullProperties.getS3BucketName();
String jobName = pipelineEnv + PIPELINE_NAME_DELIMITER + EMR + PIPELINE_NAME_DELIMITER + pipeline + PIPELINE_NAME_DELIMITER + PIPELINE_NAME_SUFFIX;
String applicationHistoryFolderPath = applicationHistoryFolder == null || applicationHistoryFolder.isEmpty() ? s3RepositoryBucketName + "/" + DATAPULL_HISTORY_FOLDER : applicationHistoryFolder;
String bootstrapFilePath = s3RepositoryBucketName + "/" + BOOTSTRAP_FOLDER;
String filePath = applicationHistoryFolderPath + "/" + jobName;
String bootstrapFile = jobName + ".sh";
String jksFilePath = bootstrapFilePath + "/" + bootstrapFile;
String bootstrapActionStringFromUser = Objects.toString(reader.getBootstrapactionstring(), "");
String defaultBootstrapString = emrProperties.getDefaultBootstrapString();
Boolean haveBootstrapAction = createBootstrapScript(myObjects, bootstrapFile, bootstrapFilePath, bootstrapActionStringFromUser, defaultBootstrapString);
DataPullTask task = createDataPullTask(filePath, jksFilePath, reader, jobName, creator, node.path("sparkjarfile").asText(), haveBootstrapAction);
if (!isStart) {
json = originalInputJson.equals(json) ? json : originalInputJson;
saveConfig(applicationHistoryFolderPath, jobName + ".json", json);
}
if (!isStart && tasksMap.containsKey(jobName))
cancelExistingTask(jobName);
if (!(isStart && cronExp.isEmpty())) {
Future<?> future = !cronExp.isEmpty() ? scheduler.schedule(task, new CronTrigger(cronExp)) : scheduler.schedule(task, new Date(System.currentTimeMillis() + 1 * 1000));
tasksMap.put(jobName, future);
}
} catch (IOException e) {
throw new ProcessingException("exception while starting datapull " + e.getLocalizedMessage());
}
if (log.isDebugEnabled())
log.debug("runDataPull <- return");
}
Aggregations