use of org.opensearch.ad.model.AnomalyDetector in project anomaly-detection by opensearch-project.
the class EntityColdStarter method getEntityColdStartData.
/**
* Get training data for an entity.
*
* We first note the maximum and minimum timestamp, and sample at most 24 points
* (with 60 points apart between two neighboring samples) between those minimum
* and maximum timestamps. Samples can be missing. We only interpolate points
* between present neighboring samples. We then transform samples and interpolate
* points to shingles. Finally, full shingles will be used for cold start.
*
* @param detectorId detector Id
* @param entity the entity's information
* @param listener listener to return training data
*/
private void getEntityColdStartData(String detectorId, Entity entity, ActionListener<Optional<List<double[][]>>> listener) {
ActionListener<Optional<AnomalyDetector>> getDetectorListener = ActionListener.wrap(detectorOp -> {
if (!detectorOp.isPresent()) {
listener.onFailure(new EndRunException(detectorId, "AnomalyDetector is not available.", false));
return;
}
List<double[][]> coldStartData = new ArrayList<>();
AnomalyDetector detector = detectorOp.get();
ActionListener<Optional<Long>> minTimeListener = ActionListener.wrap(earliest -> {
if (earliest.isPresent()) {
long startTimeMs = earliest.get().longValue();
nodeStateManager.getAnomalyDetectorJob(detectorId, ActionListener.wrap(jobOp -> {
if (!jobOp.isPresent()) {
listener.onFailure(new EndRunException(detectorId, "AnomalyDetector job is not available.", false));
return;
}
AnomalyDetectorJob job = jobOp.get();
// End time uses milliseconds as start time is assumed to be in milliseconds.
// Opensearch uses a set of preconfigured formats to recognize and parse these strings into a long value
// representing milliseconds-since-the-epoch in UTC.
// More on https://tinyurl.com/wub4fk92
// Existing samples either predates or coincide with cold start data. In either case,
// combining them without reordering based on time stamps is not ok. We might introduce
// anomalies in the process.
// An ideal solution would be to record time stamps of data points and combine existing
// samples and cold start samples and do interpolation afterwards. Recording time stamps
// requires changes across the board like bwc in checkpoints. A pragmatic solution is to use
// job enabled time as the end time of cold start period as it is easier to combine
// existing samples with cold start data. We just need to appends existing samples after
// cold start data as existing samples all happen after job enabled time. There might
// be some gaps in between the last cold start sample and the first accumulated sample.
// We will need to accept that precision loss in current solution.
long endTimeMs = job.getEnabledTime().toEpochMilli();
Pair<Integer, Integer> params = selectRangeParam(detector);
int stride = params.getLeft();
int numberOfSamples = params.getRight();
// we start with round 0
getFeatures(listener, 0, coldStartData, detector, entity, stride, numberOfSamples, startTimeMs, endTimeMs);
}, listener::onFailure));
} else {
listener.onResponse(Optional.empty());
}
}, listener::onFailure);
searchFeatureDao.getEntityMinDataTime(detector, entity, new ThreadedActionListener<>(logger, threadPool, AnomalyDetectorPlugin.AD_THREAD_POOL_NAME, minTimeListener, false));
}, listener::onFailure);
nodeStateManager.getAnomalyDetector(detectorId, new ThreadedActionListener<>(logger, threadPool, AnomalyDetectorPlugin.AD_THREAD_POOL_NAME, getDetectorListener, false));
}
use of org.opensearch.ad.model.AnomalyDetector in project anomaly-detection by opensearch-project.
the class SearchFeatureDao method getFeaturesForPeriodByBatch.
public void getFeaturesForPeriodByBatch(AnomalyDetector detector, Entity entity, long startTime, long endTime, ActionListener<Map<Long, Optional<double[]>>> listener) throws IOException {
SearchSourceBuilder searchSourceBuilder = batchFeatureQuery(detector, entity, startTime, endTime, xContent);
logger.debug("Batch query for detector {}: {} ", detector.getDetectorId(), searchSourceBuilder);
SearchRequest searchRequest = new SearchRequest(detector.getIndices().toArray(new String[0])).source(searchSourceBuilder);
client.search(searchRequest, ActionListener.wrap(response -> {
listener.onResponse(parseBucketAggregationResponse(response, detector.getEnabledFeatureIds()));
}, listener::onFailure));
}
use of org.opensearch.ad.model.AnomalyDetector in project anomaly-detection by opensearch-project.
the class SearchFeatureDao method getColdStartSamplesForPeriods.
public void getColdStartSamplesForPeriods(AnomalyDetector detector, List<Entry<Long, Long>> ranges, Entity entity, boolean includesEmptyBucket, ActionListener<List<Optional<double[]>>> listener) throws IOException {
SearchRequest request = createColdStartFeatureSearchRequest(detector, ranges, entity);
client.search(request, ActionListener.wrap(response -> {
Aggregations aggs = response.getAggregations();
if (aggs == null) {
listener.onResponse(Collections.emptyList());
return;
}
long docCountThreshold = includesEmptyBucket ? -1 : 0;
// Extract buckets and order by from_as_string. Currently by default it is ascending. Better not to assume it.
// Example responses from date range bucket aggregation:
// "aggregations":{"date_range":{"buckets":[{"key":"1598865166000-1598865226000","from":1.598865166E12,"
// from_as_string":"1598865166000","to":1.598865226E12,"to_as_string":"1598865226000","doc_count":3,
// "deny_max":{"value":154.0}},{"key":"1598869006000-1598869066000","from":1.598869006E12,
// "from_as_string":"1598869006000","to":1.598869066E12,"to_as_string":"1598869066000","doc_count":3,
// "deny_max":{"value":141.0}},
// We don't want to use default 0 for sum/count aggregation as it might cause false positives during scoring.
// Terms aggregation only returns non-zero count values. If we use a lot of 0s during cold start,
// we will see alarming very easily.
listener.onResponse(aggs.asList().stream().filter(InternalDateRange.class::isInstance).flatMap(agg -> ((InternalDateRange) agg).getBuckets().stream()).filter(bucket -> bucket.getFrom() != null && bucket.getFrom() instanceof ZonedDateTime).filter(bucket -> bucket.getDocCount() > docCountThreshold).sorted(Comparator.comparing((Bucket bucket) -> (ZonedDateTime) bucket.getFrom())).map(bucket -> parseBucket(bucket, detector.getEnabledFeatureIds())).collect(Collectors.toList()));
}, listener::onFailure));
}
use of org.opensearch.ad.model.AnomalyDetector in project anomaly-detection by opensearch-project.
the class SearchFeatureDao method getHighestCountEntities.
/**
* Get list of entities with high count in descending order within specified time range
* @param detector detector config
* @param startTime start time of time range
* @param endTime end time of time range
* @param maxEntitiesSize max top entities
* @param minimumDocCount minimum doc count for top entities
* @param pageSize page size when query multi-category HC detector's top entities
* @param listener listener to return back the entities
*/
public void getHighestCountEntities(AnomalyDetector detector, long startTime, long endTime, int maxEntitiesSize, int minimumDocCount, int pageSize, ActionListener<List<Entity>> listener) {
if (!detector.isMultientityDetector()) {
listener.onResponse(null);
return;
}
RangeQueryBuilder rangeQuery = new RangeQueryBuilder(detector.getTimeField()).from(startTime).to(endTime).format("epoch_millis").includeLower(true).includeUpper(false);
BoolQueryBuilder boolQueryBuilder = QueryBuilders.boolQuery().filter(rangeQuery).filter(detector.getFilterQuery());
AggregationBuilder bucketAggs = null;
if (detector.getCategoryField().size() == 1) {
bucketAggs = AggregationBuilders.terms(AGG_NAME_TOP).size(maxEntitiesSize).field(detector.getCategoryField().get(0));
} else {
/*
* We don't have an efficient solution for terms aggregation on multiple fields.
* Terms aggregation does not support collecting terms from multiple fields in the same document.
* We have to work around the limitation by using a script to retrieve terms from multiple fields.
* The workaround disables the global ordinals optimization and thus causes a markedly longer
* slowdown. This is because scripting is tugging on memory and has to iterate through
* all of the documents at least once to create run-time fields.
*
* We evaluated composite and terms aggregation using a generated data set with one
* million entities. Each entity has two documents. Composite aggregation finishes
* around 40 seconds. Terms aggregation performs differently on different clusters.
* On a 3 data node cluster, terms aggregation does not finish running within 2 hours
* on a 5 primary shard index. On a 15 data node cluster, terms aggregation needs 217 seconds
* on a 15 primary shard index. On a 30 data node cluster, terms aggregation needs 47 seconds
* on a 30 primary shard index.
*
* Here we work around the problem using composite aggregation. Composite aggregation cannot
* give top entities without collecting all aggregated results. Paginated results are returned
* in the natural order of composite keys. This is fine for Preview API. Preview API needs the
* top entities to make sure there is enough data for training and showing the results. We
* can paginate entities and filter out entities that do not have enough docs (e.g., 256 docs).
* As long as we have collected the desired number of entities (e.g., 5 entities), we can stop
* pagination.
*
* Example composite query:
* {
* "size": 0,
* "query": {
* "bool": {
* "filter": [{
* "range": {
* "@timestamp": {
* "from": 1626118340000,
* "to": 1626294912000,
* "include_lower": true,
* "include_upper": false,
* "format": "epoch_millis",
* "boost": 1.0
* }
* }
* }, {
* "match_all": {
* "boost": 1.0
* }
* }],
* "adjust_pure_negative": true,
* "boost": 1.0
* }
* },
* "track_total_hits": -1,
* "aggregations": {
* "top_agg": {
* "composite": {
* "size": 1,
* "sources": [{
* "service": {
* "terms": {
* "field": "service",
* "missing_bucket": false,
* "order": "asc"
* }
* }
* }, {
* "host": {
* "terms": {
* "field": "host",
* "missing_bucket": false,
* "order": "asc"
* }
* }
* }]
* },
* "aggregations": {
* "bucketSort": {
* "bucket_sort": {
* "sort": [{
* "_count": {
* "order": "desc"
* }
* }],
* "from": 0,
* "size": 5,
* "gap_policy": "SKIP"
* }
* }
* }
* }
* }
* }
*
*/
bucketAggs = AggregationBuilders.composite(AGG_NAME_TOP, detector.getCategoryField().stream().map(f -> new TermsValuesSourceBuilder(f).field(f)).collect(Collectors.toList())).size(pageSize).subAggregation(PipelineAggregatorBuilders.bucketSort("bucketSort", Arrays.asList(new FieldSortBuilder("_count").order(SortOrder.DESC))).size(maxEntitiesSize));
}
SearchSourceBuilder searchSourceBuilder = new SearchSourceBuilder().query(boolQueryBuilder).aggregation(bucketAggs).trackTotalHits(false).size(0);
SearchRequest searchRequest = new SearchRequest().indices(detector.getIndices().toArray(new String[0])).source(searchSourceBuilder);
client.search(searchRequest, new TopEntitiesListener(listener, detector, searchSourceBuilder, // TODO: tune timeout for historical analysis based on performance test result
clock.millis() + previewTimeoutInMilliseconds, maxEntitiesSize, minimumDocCount));
}
use of org.opensearch.ad.model.AnomalyDetector in project anomaly-detection by opensearch-project.
the class RestHandlerUtilsTests method testValidateAnomalyDetectorWithTooManyFeatures.
public void testValidateAnomalyDetectorWithTooManyFeatures() throws IOException {
AnomalyDetector detector = TestHelpers.randomAnomalyDetector(ImmutableList.of(randomFeature(), randomFeature()));
String error = RestHandlerUtils.checkAnomalyDetectorFeaturesSyntax(detector, 1);
assertEquals("Can't create more than 1 anomaly features", error);
}
Aggregations