use of org.elasticsearch.search.aggregations.bucket.terms.ParsedTerms in project incubator-inlong by apache.
the class AuditServiceImpl method listByCondition.
/**
* Query audit data for list by condition
*
* @param request The audit request of query condition
* @return The result of query
*/
@Override
public List<AuditVO> listByCondition(AuditRequest request) throws IOException {
LOGGER.info("begin query audit list request={}", request);
Preconditions.checkNotNull(request, "request is null");
List<AuditVO> result = new ArrayList<>();
AuditQuerySource querySource = AuditQuerySource.valueOf(auditQuerySource);
for (String auditId : request.getAuditIds()) {
if (AuditQuerySource.MYSQL == querySource) {
String format = "%Y-%m-%d %H:%i:00";
// Support min agg at now
DateTimeFormatter forPattern = DateTimeFormat.forPattern("yyyy-MM-dd");
DateTime dtDate = forPattern.parseDateTime(request.getDt());
String eDate = dtDate.plusDays(1).toString(forPattern);
List<Map<String, Object>> sumList = auditEntityMapper.sumByLogTs(request.getInlongGroupId(), request.getInlongStreamId(), auditId, request.getDt(), eDate, format);
List<AuditInfo> auditSet = sumList.stream().map(s -> {
AuditInfo vo = new AuditInfo();
vo.setLogTs((String) s.get("logTs"));
vo.setCount(((BigDecimal) s.get("total")).longValue());
return vo;
}).collect(Collectors.toList());
result.add(new AuditVO(auditId, auditSet));
} else if (AuditQuerySource.ELASTICSEARCH == querySource) {
String index = String.format("%s_%s", request.getDt().replaceAll("-", ""), auditId);
if (elasticsearchApi.indexExists(index)) {
SearchResponse response = elasticsearchApi.search(toAuditSearchRequest(index, request.getInlongGroupId(), request.getInlongStreamId()));
final List<Aggregation> aggregations = response.getAggregations().asList();
if (CollectionUtils.isNotEmpty(aggregations)) {
ParsedTerms terms = (ParsedTerms) aggregations.get(0);
if (CollectionUtils.isNotEmpty(terms.getBuckets())) {
List<AuditInfo> auditSet = terms.getBuckets().stream().map(bucket -> {
AuditInfo vo = new AuditInfo();
vo.setLogTs(bucket.getKeyAsString());
vo.setCount((long) ((ParsedSum) bucket.getAggregations().asList().get(0)).getValue());
return vo;
}).collect(Collectors.toList());
result.add(new AuditVO(auditId, auditSet));
}
}
} else {
LOGGER.warn("Elasticsearch index={} not exists", index);
}
}
}
LOGGER.info("success to query audit list for request={}", request);
return result;
}
use of org.elasticsearch.search.aggregations.bucket.terms.ParsedTerms in project apiman by apiman.
the class EsRegistry method listClientVersions.
@Override
@SuppressWarnings("nls")
public void listClientVersions(String organizationId, String clientId, int page, int pageSize, IAsyncResultHandler<List<String>> handler) {
String query = "{" + " \"query\": {" + " \"bool\": {" + " \"filter\": [" + " {" + " \"term\": {" + // organizationId
" \"organizationId\": \"{{organizationId}}\"" + " }" + " }," + " {" + " \"term\": {" + // clientId
" \"clientId\": \"{{clientId}}\"" + " }" + " }" + " ]" + " }" + " }," + " \"aggs\": {" + " \"client_versions\": {" + " \"terms\": {" + // only return version fields of clients
" \"field\": \"version\"" + " }" + " }" + " }" + "}";
SearchTemplateRequest searchTemplateRequest = new SearchTemplateRequest();
searchTemplateRequest.setRequest(new SearchRequest(getIndexPrefix() + EsConstants.INDEX_CLIENTS));
searchTemplateRequest.setScriptType(ScriptType.INLINE);
searchTemplateRequest.setScript(query);
Map<String, Object> scriptParams = new HashMap<>();
scriptParams.put("organizationId", organizationId);
scriptParams.put("clientId", clientId);
searchTemplateRequest.setScriptParams(scriptParams);
try {
SearchTemplateResponse response = getClient().searchTemplate(searchTemplateRequest, RequestOptions.DEFAULT);
SearchResponse searchResponse = response.getResponse();
List terms = ((ParsedTerms) searchResponse.getAggregations().asMap().get("client_versions")).getBuckets();
// Grab only the name of each aggregation (we don't care about count
List<String> results = (List<String>) terms.stream().map(o -> ((ParsedTerms.ParsedBucket) o).getKey()).collect(Collectors.toList());
handler.handle(AsyncResultImpl.create(results));
} catch (IOException e) {
LOGGER.error(e.getMessage(), e);
}
}
use of org.elasticsearch.search.aggregations.bucket.terms.ParsedTerms in project apiman by apiman.
the class EsRegistry method listOrgs.
@Override
@SuppressWarnings("nls")
public void listOrgs(IAsyncResultHandler<List<String>> handler) {
try {
SearchSourceBuilder searchSourceBuilder = new SearchSourceBuilder();
// i.e. only records containing an organizationId field:
TermsAggregationBuilder aggregation = AggregationBuilders.terms("all_orgs").field("organizationId");
// keep searching in specific api mgmt indices to avoid search in foreign indices beside specific api-mgmt ones
String[] indices = { getIndexPrefix() + EsConstants.INDEX_APIS, getIndexPrefix() + EsConstants.INDEX_CLIENTS };
SearchRequest searchRequest = new SearchRequest(indices).source(searchSourceBuilder.aggregation(aggregation));
SearchResponse searchResponse = getClient().search(searchRequest, RequestOptions.DEFAULT);
List terms = ((ParsedTerms) searchResponse.getAggregations().asMap().get("all_orgs")).getBuckets();
// Grab only the name of each aggregation (we don't care about count
List<String> results = (List<String>) terms.stream().map(o -> ((ParsedTerms.ParsedBucket) o).getKey()).collect(Collectors.toList());
handler.handle(AsyncResultImpl.create(results));
} catch (IOException e) {
LOGGER.error(e.getMessage(), e);
}
}
use of org.elasticsearch.search.aggregations.bucket.terms.ParsedTerms in project vind by RBMHTechnology.
the class ResultUtils method getStatsFacetResults.
private static Pair<String, StatsFacetResult> getStatsFacetResults(Map<String, Aggregation> aggregations, Facet.StatsFacet statsFacet) {
final FieldDescriptor field = statsFacet.getField();
Object min = null;
Object max = null;
Double sum = null;
Long count = null;
Long missing = null;
Double sumOfSquares = null;
Object mean = null;
Double stddev = null;
Map<Double, Double> percentiles = null;
List distinctValues = null;
Long countDistinct = null;
Long cardinality = null;
final Optional<ParsedExtendedStats> statsAggregation = aggregations.entrySet().stream().filter(entry -> entry.getKey().endsWith(statsFacet.getFacetName())).map(Map.Entry::getValue).map(agg -> (ParsedExtendedStats) agg).findFirst();
if (statsAggregation.isPresent() && statsFacet.getSum()) {
sum = statsAggregation.get().getSum();
}
if (statsAggregation.isPresent() && statsFacet.getMin()) {
min = DocumentUtil.castForDescriptor(statsAggregation.get().getMin(), field, FieldDescriptor.UseCase.Facet);
}
if (statsAggregation.isPresent() && statsFacet.getMax()) {
max = DocumentUtil.castForDescriptor(statsAggregation.get().getMax(), field, FieldDescriptor.UseCase.Facet);
}
if (statsAggregation.isPresent() && statsFacet.getCount()) {
count = statsAggregation.get().getCount();
}
if (statsFacet.getMissing()) {
final Optional<Aggregation> statsMissingAggregation = aggregations.entrySet().stream().filter(entry -> entry.getKey().endsWith(statsFacet.getFacetName() + "_missing")).map(Map.Entry::getValue).findFirst();
if (statsMissingAggregation.isPresent()) {
missing = ((ParsedMissing) statsMissingAggregation.get()).getDocCount();
}
}
if (statsAggregation.isPresent() && statsFacet.getSumOfSquares()) {
sumOfSquares = statsAggregation.get().getSumOfSquares();
}
if (statsAggregation.isPresent() && statsFacet.getMean()) {
mean = DocumentUtil.castForDescriptor(statsAggregation.get().getAvg(), field, FieldDescriptor.UseCase.Facet);
}
if (statsAggregation.isPresent() && statsFacet.getStddev()) {
stddev = statsAggregation.get().getStdDeviation();
}
if (ArrayUtils.isNotEmpty(statsFacet.getPercentiles())) {
final Optional<Aggregation> statsPercentilesAggregation = aggregations.entrySet().stream().filter(entry -> entry.getKey().endsWith(statsFacet.getFacetName() + "_percentiles")).map(Map.Entry::getValue).findFirst();
if (statsPercentilesAggregation.isPresent()) {
percentiles = Streams.stream(((ParsedPercentiles) statsPercentilesAggregation.get()).iterator()).collect(Collectors.toMap((Percentile p) -> Double.valueOf(p.getPercent()), (Percentile p) -> Double.valueOf(p.getValue())));
}
}
if (statsFacet.getDistinctValues()) {
final Optional<Aggregation> statsValuesAggregation = aggregations.entrySet().stream().filter(entry -> entry.getKey().endsWith(statsFacet.getFacetName() + "_values")).map(Map.Entry::getValue).findFirst();
if (statsValuesAggregation.isPresent()) {
distinctValues = ((ParsedTerms) statsValuesAggregation.get()).getBuckets().stream().filter(bucket -> bucket.getDocCount() > 0).map(MultiBucketsAggregation.Bucket::getKey).map(o -> DocumentUtil.castForDescriptor(o, field, FieldDescriptor.UseCase.Facet)).collect(Collectors.toList());
}
}
if (statsFacet.getCountDistinct()) {
final Optional<Aggregation> statsValuesAggregation = aggregations.entrySet().stream().filter(entry -> entry.getKey().endsWith(statsFacet.getFacetName() + "_values")).map(Map.Entry::getValue).findFirst();
if (statsValuesAggregation.isPresent()) {
countDistinct = ((ParsedTerms) statsValuesAggregation.get()).getBuckets().stream().filter(bucket -> bucket.getDocCount() > 0).count();
}
}
if (statsFacet.getCardinality()) {
final Optional<Aggregation> statsCardinalityAggregation = aggregations.entrySet().stream().filter(entry -> entry.getKey().endsWith(statsFacet.getFacetName() + "_cardinality")).map(Map.Entry::getValue).findFirst();
if (statsCardinalityAggregation.isPresent()) {
cardinality = ((ParsedCardinality) statsCardinalityAggregation.get()).getValue();
}
}
final StatsFacetResult statsFacetResult = new StatsFacetResult(field, min, max, sum, count, missing, sumOfSquares, mean, stddev, percentiles, distinctValues, countDistinct, cardinality);
return Pair.of(statsFacet.getFacetName(), statsFacetResult);
}
use of org.elasticsearch.search.aggregations.bucket.terms.ParsedTerms in project vind by RBMHTechnology.
the class ResultUtils method getPivotFacetResults.
private static Pair<String, List<PivotFacetResult>> getPivotFacetResults(Aggregation aggregation, Facet.PivotFacet pivotFacet, Map<String, Facet> vindFacets) {
final FieldDescriptor field = pivotFacet.getFieldDescriptors().get(0);
if (Objects.nonNull(aggregation)) {
final ParsedTerms rootPivot = (ParsedTerms) aggregation;
final List<PivotFacetResult> pivotFacetResult = rootPivot.getBuckets().stream().map(bucket -> {
final Map<String, Aggregation> aggMap = bucket.getAggregations().asMap();
final Aggregation pivotAgg = aggMap.get(pivotFacet.getFacetName());
final Map<String, RangeFacetResult<?>> rangeSubfacets = new HashMap<>();
final Map<String, QueryFacetResult<?>> querySubfacets = new HashMap<>();
final Map<String, StatsFacetResult<?>> statsSubfacets = new HashMap<>();
Double score = null;
if (!pivotFacet.getSortings().isEmpty()) {
score = pivotFacet.getSortings().keySet().stream().map(aggMap::get).mapToDouble(sortAgg -> ((ParsedSingleValueNumericMetricsAggregation) sortAgg).value()).sum();
}
aggMap.values().forEach(agg -> {
if (ParsedExtendedStats.class.isAssignableFrom(agg.getClass())) {
final HashMap<String, Aggregation> statsMap = new HashMap<>();
statsMap.put(agg.getName(), agg);
statsSubfacets.put(agg.getName(), ResultUtils.getStatsFacetResults(statsMap, (Facet.StatsFacet) vindFacets.get(agg.getName())).getValue());
}
if (ParsedQuery.class.isAssignableFrom(agg.getClass())) {
querySubfacets.put(agg.getName(), ResultUtils.getQueryFacetResults(agg, (Facet.QueryFacet) vindFacets.get(agg.getName())).getValue());
}
if (ParsedRange.class.isAssignableFrom(agg.getClass())) {
rangeSubfacets.put(agg.getName(), ResultUtils.getRangeFacetResults(agg, vindFacets.get(agg.getName())).getValue());
}
});
final List<PivotFacetResult> subPivot = getPivotFacetResults(pivotAgg, pivotFacet, vindFacets).getValue();
return new PivotFacetResult(subPivot, bucket.getKey(), field, Long.valueOf(bucket.getDocCount()).intValue(), rangeSubfacets, querySubfacets, statsSubfacets, score);
}).collect(Collectors.toList());
return Pair.of(pivotFacet.getFacetName(), pivotFacetResult);
}
return Pair.of(null, null);
}
Aggregations