use of com.amazonaws.services.s3.model.Filter in project workflow-service by UKHomeOffice.
the class CasesApplicationService method query.
/**
* Query for cases that match a key. Each case is a collection of process instance pointers. No internal data
* is returned.
*
* @param query
* @param pageable
* @param platformUser
* @return a list of cases.
*/
@AuditableCaseEvent
public Page<Case> query(String query, Pageable pageable, PlatformUser platformUser) {
log.info("Performing search by {}", platformUser.getEmail());
final SearchRequest searchRequest = new SearchRequest();
SearchSourceBuilder sourceBuilder = new SearchSourceBuilder();
sourceBuilder.query(QueryBuilders.queryStringQuery(query));
sourceBuilder.from(Math.toIntExact(pageable.getOffset()));
sourceBuilder.size(pageable.getPageSize());
sourceBuilder.fetchSource(new String[] { "businessKey" }, null);
searchRequest.source(sourceBuilder);
try {
RequestOptions.Builder builder = RequestOptions.DEFAULT.toBuilder();
builder.addHeader("Content-Type", "application/json");
final SearchResponse results = elasticsearchClient.search(searchRequest, builder.build());
final Set<String> keys = StreamSupport.stream(results.getHits().spliterator(), false).filter(s -> s.getSourceAsMap().containsKey("businessKey")).map(s -> s.getSourceAsMap().get("businessKey").toString()).collect(toSet());
List<HistoricProcessInstance> historicProcessInstances = new ArrayList<>();
if (!keys.isEmpty()) {
final List<HistoricProcessInstance> instances = keys.stream().map(key -> historyService.createHistoricProcessInstanceQuery().processInstanceBusinessKey(key).list()).flatMap(List::stream).collect(toList());
historicProcessInstances.addAll(instances);
}
Map<String, List<HistoricProcessInstance>> groupedByBusinessKey = historicProcessInstances.stream().filter(instance -> this.candidateGroupFilter(instance, platformUser)).collect(Collectors.groupingBy(HistoricProcessInstance::getBusinessKey));
List<Case> cases = groupedByBusinessKey.keySet().stream().map(key -> {
Case caseDto = new Case();
caseDto.setBusinessKey(key);
List<HistoricProcessInstance> instances = groupedByBusinessKey.get(key);
caseDto.setProcessInstances(instances.stream().map(HistoricProcessInstanceDto::fromHistoricProcessInstance).collect(toList()));
return caseDto;
}).collect(toList());
final long totalHits = cases.size() == 0 ? 0 : results.getHits().getTotalHits().value;
log.info("Number of cases returned for '{}' is '{}'", query, totalHits);
return new PageImpl<>(cases, PageRequest.of(pageable.getPageNumber(), pageable.getPageSize()), totalHits);
} catch (Exception e) {
log.error("Failed to perform search", e);
throw new RuntimeException(e);
}
}
use of com.amazonaws.services.s3.model.Filter in project workflow-service by UKHomeOffice.
the class PdfService method sendPDFs.
public void sendPDFs(String senderAddress, List<String> recipients, String body, String subject, List<String> attachmentIds) {
if (recipients.isEmpty()) {
log.warn("No recipients defined so not sending email");
return;
}
List<String> filteredRecipients = recipients.stream().filter(StringUtils::isNotBlank).collect(Collectors.toList());
try {
Session session = Session.getDefaultInstance(new Properties());
MimeMessage mimeMessage = new MimeMessage(session);
mimeMessage.setSubject(subject, "UTF-8");
mimeMessage.setFrom(senderAddress);
mimeMessage.setRecipients(Message.RecipientType.TO, filteredRecipients.stream().map(recipient -> {
Address address = null;
try {
address = new InternetAddress(recipient);
} catch (AddressException e) {
log.error("Failed to resolve to address {} {}", recipient, e.getMessage());
}
return address;
}).toArray(Address[]::new));
MimeMultipart mp = new MimeMultipart();
BodyPart part = new MimeBodyPart();
part.setContent(body, "text/html");
mp.addBodyPart(part);
attachmentIds.forEach(id -> {
try {
MimeBodyPart attachment = new MimeBodyPart();
DataSource dataSource;
if (!new URI(id).isAbsolute()) {
S3Object object = amazonS3.getObject(environment.getProperty("aws.s3.pdfs"), id);
dataSource = new ByteArrayDataSource(object.getObjectContent(), "application/pdf");
attachment.setFileName(id);
attachment.setContent("Content-Type", "application/pdf");
} else {
dataSource = restTemplate.execute(id, HttpMethod.GET, null, (ResponseExtractor<DataSource>) response -> {
String type = Objects.requireNonNull(response.getHeaders().getContentType()).toString();
try {
attachment.setFileName(response.getHeaders().getContentDisposition().getFilename());
attachment.setContent("Content-Type", type);
} catch (MessagingException e) {
log.error("Unable to set file name {}", e.getMessage());
}
return new ByteArrayDataSource(response.getBody(), type);
});
}
attachment.setDataHandler(new DataHandler(dataSource));
attachment.setHeader("Content-ID", "<" + UUID.randomUUID().toString() + ">");
mp.addBodyPart(attachment);
} catch (IOException | MessagingException | URISyntaxException e) {
log.error("Failed to get data from S3 {}", e.getMessage());
}
});
mimeMessage.setContent(mp);
ByteArrayOutputStream outputStream = new ByteArrayOutputStream();
mimeMessage.writeTo(outputStream);
RawMessage rawMessage = new RawMessage(ByteBuffer.wrap(outputStream.toByteArray()));
SendRawEmailRequest sendEmailRequest = new SendRawEmailRequest(rawMessage);
SendRawEmailResult result = amazonSimpleEmailService.sendRawEmail(sendEmailRequest);
log.info("SES send result {}", result.getMessageId());
} catch (Exception e) {
log.error("Failed to send SES", e);
throw new BpmnError("FAILED_TO_SEND_SES", e.getMessage(), e);
}
}
use of com.amazonaws.services.s3.model.Filter in project druid by druid-io.
the class EC2AutoScaler method terminate.
@Override
public AutoScalingData terminate(List<String> ips) {
if (ips.isEmpty()) {
return new AutoScalingData(Lists.<String>newArrayList());
}
DescribeInstancesResult result = amazonEC2Client.describeInstances(new DescribeInstancesRequest().withFilters(new Filter("private-ip-address", ips)));
List<Instance> instances = Lists.newArrayList();
for (Reservation reservation : result.getReservations()) {
instances.addAll(reservation.getInstances());
}
try {
return terminateWithIds(Lists.transform(instances, new Function<Instance, String>() {
@Override
public String apply(Instance input) {
return input.getInstanceId();
}
}));
} catch (Exception e) {
log.error(e, "Unable to terminate any instances.");
}
return null;
}
use of com.amazonaws.services.s3.model.Filter in project stocator by SparkTC.
the class COSAPIClient method list.
@Override
public FileStatus[] list(String hostName, Path path, boolean fullListing, boolean prefixBased, Boolean isDirectory, boolean flatListing, PathFilter filter) throws FileNotFoundException, IOException {
LOG.debug("Native direct list status for {}", path);
ArrayList<FileStatus> tmpResult = new ArrayList<FileStatus>();
String key = pathToKey(path);
if (isDirectory != null && isDirectory.booleanValue() && !key.endsWith("/") && !path.toString().equals(hostName)) {
key = key + "/";
LOG.debug("listNativeDirect modify key to {}", key);
}
Map<String, FileStatus> emptyObjects = new HashMap<String, FileStatus>();
ListObjectsRequest request = new ListObjectsRequest();
request.setBucketName(mBucket);
request.setMaxKeys(5000);
request.setPrefix(key);
if (!flatListing) {
request.setDelimiter("/");
}
ObjectListing objectList = mClient.listObjects(request);
List<S3ObjectSummary> objectSummaries = objectList.getObjectSummaries();
List<String> commonPrefixes = objectList.getCommonPrefixes();
boolean objectScanContinue = true;
S3ObjectSummary prevObj = null;
// start FTA logic
boolean stocatorOrigin = isSparkOrigin(key, path.toString());
if (stocatorOrigin) {
LOG.debug("Stocator origin is true for {}", key);
if (!isJobSuccessful(key)) {
LOG.debug("{} created by failed Spark job. Skipped", key);
if (fModeAutomaticDelete) {
delete(hostName, new Path(key), true);
}
return new FileStatus[0];
}
}
while (objectScanContinue) {
for (S3ObjectSummary obj : objectSummaries) {
if (prevObj == null) {
prevObj = obj;
prevObj.setKey(correctPlusSign(key, prevObj.getKey()));
continue;
}
obj.setKey(correctPlusSign(key, obj.getKey()));
String objKey = obj.getKey();
String unifiedObjectName = extractUnifiedObjectName(objKey);
LOG.trace("list candidate {}, unified name {}", objKey, unifiedObjectName);
if (stocatorOrigin && !fullListing) {
LOG.trace("{} created by Spark", unifiedObjectName);
// we need to make sure there are no failed attempts
if (nameWithoutTaskID(objKey).equals(nameWithoutTaskID(prevObj.getKey()))) {
// found failed that was not aborted.
LOG.trace("Colisiion found between {} and {}", prevObj.getKey(), objKey);
if (prevObj.getSize() < obj.getSize()) {
LOG.trace("New candidate is {}. Removed {}", obj.getKey(), prevObj.getKey());
prevObj = obj;
}
continue;
}
}
FileStatus fs = createFileStatus(prevObj, hostName, path);
if (fs.getLen() > 0 || fullListing) {
LOG.trace("Native direct list. Adding {} size {}", fs.getPath(), fs.getLen());
if (filter == null) {
tmpResult.add(fs);
} else if (filter != null && filter.accept(fs.getPath())) {
tmpResult.add(fs);
} else {
LOG.trace("{} rejected by path filter during list. Filter {}", fs.getPath(), filter);
}
} else {
emptyObjects.put(fs.getPath().toString(), fs);
}
prevObj = obj;
}
boolean isTruncated = objectList.isTruncated();
if (isTruncated) {
objectList = mClient.listNextBatchOfObjects(objectList);
objectSummaries = objectList.getObjectSummaries();
} else {
objectScanContinue = false;
}
}
if (prevObj != null) {
FileStatus fs = createFileStatus(prevObj, hostName, path);
LOG.trace("Adding the last object from the list {}", fs.getPath());
if (fs.getLen() > 0 || fullListing) {
LOG.trace("Native direct list. Adding {} size {}", fs.getPath(), fs.getLen());
if (filter == null) {
memoryCache.putFileStatus(fs.getPath().toString(), fs);
tmpResult.add(fs);
} else if (filter != null && filter.accept(fs.getPath())) {
memoryCache.putFileStatus(fs.getPath().toString(), fs);
tmpResult.add(fs);
} else {
LOG.trace("{} rejected by path filter during list. Filter {}", fs.getPath(), filter);
}
} else if (!fs.getPath().getName().equals(HADOOP_SUCCESS)) {
emptyObjects.put(fs.getPath().toString(), fs);
}
}
// get common prefixes
for (String comPrefix : commonPrefixes) {
LOG.trace("Common prefix is {}", comPrefix);
if (emptyObjects.containsKey(keyToQualifiedPath(hostName, comPrefix).toString()) || emptyObjects.isEmpty()) {
FileStatus status = new COSFileStatus(true, false, keyToQualifiedPath(hostName, comPrefix));
LOG.trace("Match between common prefix and empty object {}. Adding to result", comPrefix);
if (filter == null) {
memoryCache.putFileStatus(status.getPath().toString(), status);
tmpResult.add(status);
} else if (filter != null && filter.accept(status.getPath())) {
memoryCache.putFileStatus(status.getPath().toString(), status);
tmpResult.add(status);
} else {
LOG.trace("Common prefix {} rejected by path filter during list. Filter {}", status.getPath(), filter);
}
}
}
return tmpResult.toArray(new FileStatus[tmpResult.size()]);
}
use of com.amazonaws.services.s3.model.Filter in project chassis by Kixeye.
the class AwsUtils method getInstanceName.
/**
* Fetches and instance's name Tag or null if it does not have one
* @param instanceId
* @param amazonEC2
* @return
*/
public static String getInstanceName(String instanceId, AmazonEC2 amazonEC2) {
DescribeTagsResult result = amazonEC2.describeTags(new DescribeTagsRequest().withFilters(new Filter().withName("resource-id").withValues(instanceId), new Filter().withName("resource-type").withValues("instance"), new Filter().withName("key").withValues(TAG_KEY_NAME)));
if (result.getTags().isEmpty()) {
return null;
}
String name = result.getTags().get(0).getValue();
return name == null || name.trim().equals("") ? null : name;
}
Aggregations