use of org.opensearch.ad.ratelimit.ResultWriteRequest in project anomaly-detection by opensearch-project.
the class ADResultBulkRequest method writeTo.
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
out.writeVInt(anomalyResults.size());
for (ResultWriteRequest result : anomalyResults) {
result.writeTo(out);
}
}
use of org.opensearch.ad.ratelimit.ResultWriteRequest in project anomaly-detection by opensearch-project.
the class EntityResultTransportAction method onGetDetector.
private ActionListener<Optional<AnomalyDetector>> onGetDetector(ActionListener<AcknowledgedResponse> listener, String detectorId, EntityResultRequest request, Optional<Exception> prevException) {
return ActionListener.wrap(detectorOptional -> {
if (!detectorOptional.isPresent()) {
listener.onFailure(new EndRunException(detectorId, "AnomalyDetector is not available.", true));
return;
}
AnomalyDetector detector = detectorOptional.get();
if (request.getEntities() == null) {
listener.onResponse(null);
return;
}
Instant executionStartTime = Instant.now();
Map<Entity, double[]> cacheMissEntities = new HashMap<>();
for (Entry<Entity, double[]> entityEntry : request.getEntities().entrySet()) {
Entity categoricalValues = entityEntry.getKey();
if (isEntityeFromOldNodeMsg(categoricalValues) && detector.getCategoryField() != null && detector.getCategoryField().size() == 1) {
Map<String, String> attrValues = categoricalValues.getAttributes();
// handle a request from a version before OpenSearch 1.1.
categoricalValues = Entity.createSingleAttributeEntity(detector.getCategoryField().get(0), attrValues.get(CommonName.EMPTY_FIELD));
}
Optional<String> modelIdOptional = categoricalValues.getModelId(detectorId);
if (false == modelIdOptional.isPresent()) {
continue;
}
String modelId = modelIdOptional.get();
double[] datapoint = entityEntry.getValue();
ModelState<EntityModel> entityModel = cache.get().get(modelId, detector);
if (entityModel == null) {
// cache miss
cacheMissEntities.put(categoricalValues, datapoint);
continue;
}
ThresholdingResult result = modelManager.getAnomalyResultForEntity(datapoint, entityModel, modelId, categoricalValues, detector.getShingleSize());
// So many OpenSearchRejectedExecutionException if we write no matter what
if (result.getRcfScore() > 0) {
AnomalyResult resultToSave = result.toAnomalyResult(detector, Instant.ofEpochMilli(request.getStart()), Instant.ofEpochMilli(request.getEnd()), executionStartTime, Instant.now(), ParseUtils.getFeatureData(datapoint, detector), categoricalValues, indexUtil.getSchemaVersion(ADIndex.RESULT), modelId, null, null);
resultWriteQueue.put(new ResultWriteRequest(System.currentTimeMillis() + detector.getDetectorIntervalInMilliseconds(), detectorId, result.getGrade() > 0 ? RequestPriority.HIGH : RequestPriority.MEDIUM, resultToSave, detector.getResultIndex()));
}
}
// split hot and cold entities
Pair<List<Entity>, List<Entity>> hotColdEntities = cache.get().selectUpdateCandidate(cacheMissEntities.keySet(), detectorId, detector);
List<EntityFeatureRequest> hotEntityRequests = new ArrayList<>();
List<EntityFeatureRequest> coldEntityRequests = new ArrayList<>();
for (Entity hotEntity : hotColdEntities.getLeft()) {
double[] hotEntityValue = cacheMissEntities.get(hotEntity);
if (hotEntityValue == null) {
LOG.error(new ParameterizedMessage("feature value should not be null: [{}]", hotEntity));
continue;
}
hotEntityRequests.add(new EntityFeatureRequest(System.currentTimeMillis() + detector.getDetectorIntervalInMilliseconds(), detectorId, // hot entities has MEDIUM priority
RequestPriority.MEDIUM, hotEntity, hotEntityValue, request.getStart()));
}
for (Entity coldEntity : hotColdEntities.getRight()) {
double[] coldEntityValue = cacheMissEntities.get(coldEntity);
if (coldEntityValue == null) {
LOG.error(new ParameterizedMessage("feature value should not be null: [{}]", coldEntity));
continue;
}
coldEntityRequests.add(new EntityFeatureRequest(System.currentTimeMillis() + detector.getDetectorIntervalInMilliseconds(), detectorId, // cold entities has LOW priority
RequestPriority.LOW, coldEntity, coldEntityValue, request.getStart()));
}
checkpointReadQueue.putAll(hotEntityRequests);
coldEntityQueue.putAll(coldEntityRequests);
// respond back
if (prevException.isPresent()) {
listener.onFailure(prevException.get());
} else {
listener.onResponse(new AcknowledgedResponse(true));
}
}, exception -> {
LOG.error(new ParameterizedMessage("fail to get entity's anomaly grade for detector [{}]: start: [{}], end: [{}]", detectorId, request.getStart(), request.getEnd()), exception);
listener.onFailure(exception);
});
}
use of org.opensearch.ad.ratelimit.ResultWriteRequest in project anomaly-detection by opensearch-project.
the class MultiEntityResultHandlerTests method setUp.
@Override
public void setUp() throws Exception {
super.setUp();
handler = new MultiEntityResultHandler(client, settings, threadPool, anomalyDetectionIndices, clientUtil, indexUtil, clusterService);
request = new ADResultBulkRequest();
ResultWriteRequest resultWriteRequest = new ResultWriteRequest(Instant.now().plus(10, ChronoUnit.MINUTES).toEpochMilli(), detectorId, RequestPriority.MEDIUM, TestHelpers.randomAnomalyDetectResult(), null);
request.add(resultWriteRequest);
response = new ADResultBulkResponse();
super.setUpLog4jForJUnit(MultiEntityResultHandler.class);
doAnswer(invocation -> {
ActionListener<ADResultBulkResponse> listener = invocation.getArgument(2);
listener.onResponse(response);
return null;
}).when(client).execute(eq(ADResultBulkAction.INSTANCE), any(), ArgumentMatchers.<ActionListener<ADResultBulkResponse>>any());
}
use of org.opensearch.ad.ratelimit.ResultWriteRequest in project anomaly-detection by opensearch-project.
the class ADResultBulkTransportAction method doExecute.
@Override
protected void doExecute(Task task, ADResultBulkRequest request, ActionListener<ADResultBulkResponse> listener) {
// Concurrent indexing memory limit = 10% of heap
// indexing pressure = indexing bytes / indexing limit
// Write all until index pressure (global indexing memory pressure) is less than 80% of 10% of heap. Otherwise, index
// all non-zero anomaly grade index requests and index zero anomaly grade index requests with probability (1 - index pressure).
long totalBytes = indexingPressure.getCurrentCombinedCoordinatingAndPrimaryBytes() + indexingPressure.getCurrentReplicaBytes();
float indexingPressurePercent = (float) totalBytes / primaryAndCoordinatingLimits;
List<ResultWriteRequest> results = request.getAnomalyResults();
if (results == null || results.size() < 1) {
listener.onResponse(new ADResultBulkResponse());
}
BulkRequest bulkRequest = new BulkRequest();
if (indexingPressurePercent <= softLimit) {
for (ResultWriteRequest resultWriteRequest : results) {
addResult(bulkRequest, resultWriteRequest.getResult(), resultWriteRequest.getResultIndex());
}
} else if (indexingPressurePercent <= hardLimit) {
// exceed soft limit (60%) but smaller than hard limit (90%)
float acceptProbability = 1 - indexingPressurePercent;
for (ResultWriteRequest resultWriteRequest : results) {
AnomalyResult result = resultWriteRequest.getResult();
if (result.isHighPriority() || random.nextFloat() < acceptProbability) {
addResult(bulkRequest, result, resultWriteRequest.getResultIndex());
}
}
} else {
// if exceeding hard limit, only index non-zero grade or error result
for (ResultWriteRequest resultWriteRequest : results) {
AnomalyResult result = resultWriteRequest.getResult();
if (result.isHighPriority()) {
addResult(bulkRequest, result, resultWriteRequest.getResultIndex());
}
}
}
if (bulkRequest.numberOfActions() > 0) {
client.execute(BulkAction.INSTANCE, bulkRequest, ActionListener.wrap(bulkResponse -> {
List<IndexRequest> failedRequests = BulkUtil.getFailedIndexRequest(bulkRequest, bulkResponse);
listener.onResponse(new ADResultBulkResponse(failedRequests));
}, e -> {
LOG.error("Failed to bulk index AD result", e);
listener.onFailure(e);
}));
} else {
listener.onResponse(new ADResultBulkResponse());
}
}
Aggregations