use of org.opensearch.ad.caching.CacheProvider in project anomaly-detection by opensearch-project.
the class ADStatsNodesTransportActionTests method setUp.
@Override
@Before
public void setUp() throws Exception {
super.setUp();
Client client = client();
Clock clock = mock(Clock.class);
Throttler throttler = new Throttler(clock);
ThreadPool threadPool = mock(ThreadPool.class);
IndexNameExpressionResolver indexNameResolver = mock(IndexNameExpressionResolver.class);
IndexUtils indexUtils = new IndexUtils(client, new ClientUtil(Settings.EMPTY, client, throttler, threadPool), clusterService(), indexNameResolver);
ModelManager modelManager = mock(ModelManager.class);
CacheProvider cacheProvider = mock(CacheProvider.class);
EntityCache cache = mock(EntityCache.class);
when(cacheProvider.get()).thenReturn(cache);
clusterStatName1 = "clusterStat1";
clusterStatName2 = "clusterStat2";
nodeStatName1 = "nodeStat1";
nodeStatName2 = "nodeStat2";
Settings settings = Settings.builder().put(MAX_MODEL_SIZE_PER_NODE.getKey(), 10).build();
ClusterService clusterService = mock(ClusterService.class);
ClusterSettings clusterSettings = new ClusterSettings(Settings.EMPTY, Collections.unmodifiableSet(new HashSet<>(Arrays.asList(MAX_MODEL_SIZE_PER_NODE))));
when(clusterService.getClusterSettings()).thenReturn(clusterSettings);
statsMap = new HashMap<String, ADStat<?>>() {
{
put(nodeStatName1, new ADStat<>(false, new CounterSupplier()));
put(nodeStatName2, new ADStat<>(false, new ModelsOnNodeSupplier(modelManager, cacheProvider, settings, clusterService)));
put(clusterStatName1, new ADStat<>(true, new IndexStatusSupplier(indexUtils, "index1")));
put(clusterStatName2, new ADStat<>(true, new IndexStatusSupplier(indexUtils, "index2")));
put(InternalStatNames.JVM_HEAP_USAGE.getName(), new ADStat<>(true, new SettableSupplier()));
}
};
adStats = new ADStats(statsMap);
JvmService jvmService = mock(JvmService.class);
JvmStats jvmStats = mock(JvmStats.class);
JvmStats.Mem mem = mock(JvmStats.Mem.class);
when(jvmService.stats()).thenReturn(jvmStats);
when(jvmStats.getMem()).thenReturn(mem);
when(mem.getHeapUsedPercent()).thenReturn(randomShort());
adTaskManager = mock(ADTaskManager.class);
action = new ADStatsNodesTransportAction(client().threadPool(), clusterService(), mock(TransportService.class), mock(ActionFilters.class), adStats, jvmService, adTaskManager);
}
use of org.opensearch.ad.caching.CacheProvider in project anomaly-detection by opensearch-project.
the class CheckpointReadWorkerTests method setUp.
@Override
public void setUp() throws Exception {
super.setUp();
clusterService = mock(ClusterService.class);
clusterSettings = new ClusterSettings(Settings.EMPTY, Collections.unmodifiableSet(new HashSet<>(Arrays.asList(AnomalyDetectorSettings.CHECKPOINT_READ_QUEUE_MAX_HEAP_PERCENT, AnomalyDetectorSettings.CHECKPOINT_READ_QUEUE_CONCURRENCY, AnomalyDetectorSettings.CHECKPOINT_READ_QUEUE_BATCH_SIZE))));
when(clusterService.getClusterSettings()).thenReturn(clusterSettings);
state = MLUtil.randomModelState(new RandomModelStateConfig.Builder().fullModel(true).build());
checkpoint = mock(CheckpointDao.class);
Map.Entry<EntityModel, Instant> entry = new SimpleImmutableEntry<EntityModel, Instant>(state.getModel(), Instant.now());
when(checkpoint.processGetResponse(any(), anyString())).thenReturn(Optional.of(entry));
checkpointWriteQueue = mock(CheckpointWriteWorker.class);
modelManager = mock(ModelManager.class);
when(modelManager.processEntityCheckpoint(any(), any(), anyString(), anyString(), anyInt())).thenReturn(state);
when(modelManager.score(any(), anyString(), any())).thenReturn(new ThresholdingResult(0, 1, 0.7));
coldstartQueue = mock(EntityColdStartWorker.class);
resultWriteQueue = mock(ResultWriteWorker.class);
anomalyDetectionIndices = mock(AnomalyDetectionIndices.class);
cacheProvider = mock(CacheProvider.class);
entityCache = mock(EntityCache.class);
when(cacheProvider.get()).thenReturn(entityCache);
when(entityCache.hostIfPossible(any(), any())).thenReturn(true);
// Integer.MAX_VALUE makes a huge heap
worker = new CheckpointReadWorker(Integer.MAX_VALUE, AnomalyDetectorSettings.ENTITY_FEATURE_REQUEST_SIZE_IN_BYTES, AnomalyDetectorSettings.CHECKPOINT_READ_QUEUE_MAX_HEAP_PERCENT, clusterService, new Random(42), mock(ADCircuitBreakerService.class), threadPool, Settings.EMPTY, AnomalyDetectorSettings.MAX_QUEUED_TASKS_RATIO, clock, AnomalyDetectorSettings.MEDIUM_SEGMENT_PRUNE_RATIO, AnomalyDetectorSettings.LOW_SEGMENT_PRUNE_RATIO, AnomalyDetectorSettings.MAINTENANCE_FREQ_CONSTANT, AnomalyDetectorSettings.QUEUE_MAINTENANCE, modelManager, checkpoint, coldstartQueue, resultWriteQueue, nodeStateManager, anomalyDetectionIndices, cacheProvider, AnomalyDetectorSettings.HOURLY_MAINTENANCE, checkpointWriteQueue);
request = new EntityFeatureRequest(Integer.MAX_VALUE, detectorId, RequestPriority.MEDIUM, entity, new double[] { 0 }, 0);
request2 = new EntityFeatureRequest(Integer.MAX_VALUE, detectorId, RequestPriority.MEDIUM, entity2, new double[] { 0 }, 0);
request3 = new EntityFeatureRequest(Integer.MAX_VALUE, detectorId, RequestPriority.MEDIUM, entity3, new double[] { 0 }, 0);
}
use of org.opensearch.ad.caching.CacheProvider in project anomaly-detection by opensearch-project.
the class DeleteModelTransportActionTests method setUp.
@Override
@Before
public void setUp() throws Exception {
super.setUp();
ThreadPool threadPool = mock(ThreadPool.class);
ClusterService clusterService = mock(ClusterService.class);
localNodeID = "foo";
when(clusterService.localNode()).thenReturn(new DiscoveryNode(localNodeID, buildNewFakeTransportAddress(), Version.CURRENT));
when(clusterService.getClusterName()).thenReturn(new ClusterName("test"));
TransportService transportService = mock(TransportService.class);
ActionFilters actionFilters = mock(ActionFilters.class);
NodeStateManager nodeStateManager = mock(NodeStateManager.class);
ModelManager modelManager = mock(ModelManager.class);
FeatureManager featureManager = mock(FeatureManager.class);
CacheProvider cacheProvider = mock(CacheProvider.class);
EntityCache entityCache = mock(EntityCache.class);
when(cacheProvider.get()).thenReturn(entityCache);
ADTaskCacheManager adTaskCacheManager = mock(ADTaskCacheManager.class);
NodeStateManager stateManager = mock(NodeStateManager.class);
action = new DeleteModelTransportAction(threadPool, clusterService, transportService, actionFilters, nodeStateManager, modelManager, featureManager, cacheProvider, adTaskCacheManager);
}
use of org.opensearch.ad.caching.CacheProvider in project anomaly-detection by opensearch-project.
the class AnomalyDetectorPlugin method createComponents.
@Override
public Collection<Object> createComponents(Client client, ClusterService clusterService, ThreadPool threadPool, ResourceWatcherService resourceWatcherService, ScriptService scriptService, NamedXContentRegistry xContentRegistry, Environment environment, NodeEnvironment nodeEnvironment, NamedWriteableRegistry namedWriteableRegistry, IndexNameExpressionResolver indexNameExpressionResolver, Supplier<RepositoriesService> repositoriesServiceSupplier) {
EnabledSetting.getInstance().init(clusterService);
NumericSetting.getInstance().init(clusterService);
this.client = client;
this.threadPool = threadPool;
Settings settings = environment.settings();
Throttler throttler = new Throttler(getClock());
this.clientUtil = new ClientUtil(settings, client, throttler, threadPool);
this.indexUtils = new IndexUtils(client, clientUtil, clusterService, indexNameExpressionResolver);
this.nodeFilter = new DiscoveryNodeFilterer(clusterService);
this.anomalyDetectionIndices = new AnomalyDetectionIndices(client, clusterService, threadPool, settings, nodeFilter, AnomalyDetectorSettings.MAX_UPDATE_RETRY_TIMES);
this.clusterService = clusterService;
SingleFeatureLinearUniformInterpolator singleFeatureLinearUniformInterpolator = new IntegerSensitiveSingleFeatureLinearUniformInterpolator();
Interpolator interpolator = new LinearUniformInterpolator(singleFeatureLinearUniformInterpolator);
SearchFeatureDao searchFeatureDao = new SearchFeatureDao(client, xContentRegistry, interpolator, clientUtil, settings, clusterService, AnomalyDetectorSettings.NUM_SAMPLES_PER_TREE);
JvmService jvmService = new JvmService(environment.settings());
RandomCutForestMapper mapper = new RandomCutForestMapper();
mapper.setSaveExecutorContextEnabled(true);
mapper.setSaveTreeStateEnabled(true);
mapper.setPartialTreeStateEnabled(true);
V1JsonToV2StateConverter converter = new V1JsonToV2StateConverter();
double modelMaxSizePercent = AnomalyDetectorSettings.MODEL_MAX_SIZE_PERCENTAGE.get(settings);
ADCircuitBreakerService adCircuitBreakerService = new ADCircuitBreakerService(jvmService).init();
MemoryTracker memoryTracker = new MemoryTracker(jvmService, modelMaxSizePercent, AnomalyDetectorSettings.DESIRED_MODEL_SIZE_PERCENTAGE, clusterService, adCircuitBreakerService);
NodeStateManager stateManager = new NodeStateManager(client, xContentRegistry, settings, clientUtil, getClock(), AnomalyDetectorSettings.HOURLY_MAINTENANCE, clusterService);
FeatureManager featureManager = new FeatureManager(searchFeatureDao, interpolator, getClock(), AnomalyDetectorSettings.MAX_TRAIN_SAMPLE, AnomalyDetectorSettings.MAX_SAMPLE_STRIDE, AnomalyDetectorSettings.TRAIN_SAMPLE_TIME_RANGE_IN_HOURS, AnomalyDetectorSettings.MIN_TRAIN_SAMPLES, AnomalyDetectorSettings.MAX_SHINGLE_PROPORTION_MISSING, AnomalyDetectorSettings.MAX_IMPUTATION_NEIGHBOR_DISTANCE, AnomalyDetectorSettings.PREVIEW_SAMPLE_RATE, AnomalyDetectorSettings.MAX_PREVIEW_SAMPLES, AnomalyDetectorSettings.HOURLY_MAINTENANCE, threadPool, AD_THREAD_POOL_NAME);
long heapSizeBytes = JvmInfo.jvmInfo().getMem().getHeapMax().getBytes();
serializeRCFBufferPool = AccessController.doPrivileged(new PrivilegedAction<GenericObjectPool<LinkedBuffer>>() {
@Override
public GenericObjectPool<LinkedBuffer> run() {
return new GenericObjectPool<>(new BasePooledObjectFactory<LinkedBuffer>() {
@Override
public LinkedBuffer create() throws Exception {
return LinkedBuffer.allocate(AnomalyDetectorSettings.SERIALIZATION_BUFFER_BYTES);
}
@Override
public PooledObject<LinkedBuffer> wrap(LinkedBuffer obj) {
return new DefaultPooledObject<>(obj);
}
});
}
});
serializeRCFBufferPool.setMaxTotal(AnomalyDetectorSettings.MAX_TOTAL_RCF_SERIALIZATION_BUFFERS);
serializeRCFBufferPool.setMaxIdle(AnomalyDetectorSettings.MAX_TOTAL_RCF_SERIALIZATION_BUFFERS);
serializeRCFBufferPool.setMinIdle(0);
serializeRCFBufferPool.setBlockWhenExhausted(false);
serializeRCFBufferPool.setTimeBetweenEvictionRuns(AnomalyDetectorSettings.HOURLY_MAINTENANCE);
CheckpointDao checkpoint = new CheckpointDao(client, clientUtil, CommonName.CHECKPOINT_INDEX_NAME, gson, mapper, converter, new ThresholdedRandomCutForestMapper(), AccessController.doPrivileged((PrivilegedAction<Schema<ThresholdedRandomCutForestState>>) () -> RuntimeSchema.getSchema(ThresholdedRandomCutForestState.class)), HybridThresholdingModel.class, anomalyDetectionIndices, AnomalyDetectorSettings.MAX_CHECKPOINT_BYTES, serializeRCFBufferPool, AnomalyDetectorSettings.SERIALIZATION_BUFFER_BYTES, 1 - AnomalyDetectorSettings.THRESHOLD_MIN_PVALUE);
Random random = new Random(42);
CheckpointWriteWorker checkpointWriteQueue = new CheckpointWriteWorker(heapSizeBytes, AnomalyDetectorSettings.CHECKPOINT_WRITE_QUEUE_SIZE_IN_BYTES, AnomalyDetectorSettings.CHECKPOINT_WRITE_QUEUE_MAX_HEAP_PERCENT, clusterService, random, adCircuitBreakerService, threadPool, settings, AnomalyDetectorSettings.MAX_QUEUED_TASKS_RATIO, getClock(), AnomalyDetectorSettings.MEDIUM_SEGMENT_PRUNE_RATIO, AnomalyDetectorSettings.LOW_SEGMENT_PRUNE_RATIO, AnomalyDetectorSettings.MAINTENANCE_FREQ_CONSTANT, AnomalyDetectorSettings.QUEUE_MAINTENANCE, checkpoint, CommonName.CHECKPOINT_INDEX_NAME, AnomalyDetectorSettings.HOURLY_MAINTENANCE, stateManager, AnomalyDetectorSettings.HOURLY_MAINTENANCE);
EntityCache cache = new PriorityCache(checkpoint, AnomalyDetectorSettings.DEDICATED_CACHE_SIZE.get(settings), AnomalyDetectorSettings.CHECKPOINT_TTL, AnomalyDetectorSettings.MAX_INACTIVE_ENTITIES, memoryTracker, AnomalyDetectorSettings.NUM_TREES, getClock(), clusterService, AnomalyDetectorSettings.HOURLY_MAINTENANCE, threadPool, checkpointWriteQueue, AnomalyDetectorSettings.MAINTENANCE_FREQ_CONSTANT);
CacheProvider cacheProvider = new CacheProvider(cache);
EntityColdStarter entityColdStarter = new EntityColdStarter(getClock(), threadPool, stateManager, AnomalyDetectorSettings.NUM_SAMPLES_PER_TREE, AnomalyDetectorSettings.NUM_TREES, AnomalyDetectorSettings.TIME_DECAY, AnomalyDetectorSettings.NUM_MIN_SAMPLES, AnomalyDetectorSettings.MAX_SAMPLE_STRIDE, AnomalyDetectorSettings.MAX_TRAIN_SAMPLE, interpolator, searchFeatureDao, AnomalyDetectorSettings.THRESHOLD_MIN_PVALUE, featureManager, settings, AnomalyDetectorSettings.HOURLY_MAINTENANCE, checkpointWriteQueue, AnomalyDetectorSettings.MAX_COLD_START_ROUNDS);
EntityColdStartWorker coldstartQueue = new EntityColdStartWorker(heapSizeBytes, AnomalyDetectorSettings.ENTITY_REQUEST_SIZE_IN_BYTES, AnomalyDetectorSettings.ENTITY_COLD_START_QUEUE_MAX_HEAP_PERCENT, clusterService, random, adCircuitBreakerService, threadPool, settings, AnomalyDetectorSettings.MAX_QUEUED_TASKS_RATIO, getClock(), AnomalyDetectorSettings.MEDIUM_SEGMENT_PRUNE_RATIO, AnomalyDetectorSettings.LOW_SEGMENT_PRUNE_RATIO, AnomalyDetectorSettings.MAINTENANCE_FREQ_CONSTANT, AnomalyDetectorSettings.QUEUE_MAINTENANCE, entityColdStarter, AnomalyDetectorSettings.HOURLY_MAINTENANCE, stateManager);
ModelManager modelManager = new ModelManager(checkpoint, getClock(), AnomalyDetectorSettings.NUM_TREES, AnomalyDetectorSettings.NUM_SAMPLES_PER_TREE, AnomalyDetectorSettings.TIME_DECAY, AnomalyDetectorSettings.NUM_MIN_SAMPLES, AnomalyDetectorSettings.THRESHOLD_MIN_PVALUE, AnomalyDetectorSettings.MIN_PREVIEW_SIZE, AnomalyDetectorSettings.HOURLY_MAINTENANCE, AnomalyDetectorSettings.HOURLY_MAINTENANCE, entityColdStarter, featureManager, memoryTracker);
MultiEntityResultHandler multiEntityResultHandler = new MultiEntityResultHandler(client, settings, threadPool, anomalyDetectionIndices, this.clientUtil, this.indexUtils, clusterService);
ResultWriteWorker resultWriteQueue = new ResultWriteWorker(heapSizeBytes, AnomalyDetectorSettings.RESULT_WRITE_QUEUE_SIZE_IN_BYTES, AnomalyDetectorSettings.RESULT_WRITE_QUEUE_MAX_HEAP_PERCENT, clusterService, random, adCircuitBreakerService, threadPool, settings, AnomalyDetectorSettings.MAX_QUEUED_TASKS_RATIO, getClock(), AnomalyDetectorSettings.MEDIUM_SEGMENT_PRUNE_RATIO, AnomalyDetectorSettings.LOW_SEGMENT_PRUNE_RATIO, AnomalyDetectorSettings.MAINTENANCE_FREQ_CONSTANT, AnomalyDetectorSettings.QUEUE_MAINTENANCE, multiEntityResultHandler, xContentRegistry, stateManager, AnomalyDetectorSettings.HOURLY_MAINTENANCE);
CheckpointReadWorker checkpointReadQueue = new CheckpointReadWorker(heapSizeBytes, AnomalyDetectorSettings.ENTITY_FEATURE_REQUEST_SIZE_IN_BYTES, AnomalyDetectorSettings.CHECKPOINT_READ_QUEUE_MAX_HEAP_PERCENT, clusterService, random, adCircuitBreakerService, threadPool, settings, AnomalyDetectorSettings.MAX_QUEUED_TASKS_RATIO, getClock(), AnomalyDetectorSettings.MEDIUM_SEGMENT_PRUNE_RATIO, AnomalyDetectorSettings.LOW_SEGMENT_PRUNE_RATIO, AnomalyDetectorSettings.MAINTENANCE_FREQ_CONSTANT, AnomalyDetectorSettings.QUEUE_MAINTENANCE, modelManager, checkpoint, coldstartQueue, resultWriteQueue, stateManager, anomalyDetectionIndices, cacheProvider, AnomalyDetectorSettings.HOURLY_MAINTENANCE, checkpointWriteQueue);
ColdEntityWorker coldEntityQueue = new ColdEntityWorker(heapSizeBytes, AnomalyDetectorSettings.ENTITY_FEATURE_REQUEST_SIZE_IN_BYTES, AnomalyDetectorSettings.COLD_ENTITY_QUEUE_MAX_HEAP_PERCENT, clusterService, random, adCircuitBreakerService, threadPool, settings, AnomalyDetectorSettings.MAX_QUEUED_TASKS_RATIO, getClock(), AnomalyDetectorSettings.MEDIUM_SEGMENT_PRUNE_RATIO, AnomalyDetectorSettings.LOW_SEGMENT_PRUNE_RATIO, AnomalyDetectorSettings.MAINTENANCE_FREQ_CONSTANT, checkpointReadQueue, AnomalyDetectorSettings.HOURLY_MAINTENANCE, stateManager);
ADDataMigrator dataMigrator = new ADDataMigrator(client, clusterService, xContentRegistry, anomalyDetectionIndices);
HashRing hashRing = new HashRing(nodeFilter, getClock(), settings, client, clusterService, dataMigrator, modelManager);
anomalyDetectorRunner = new AnomalyDetectorRunner(modelManager, featureManager, AnomalyDetectorSettings.MAX_PREVIEW_RESULTS);
Map<String, ADStat<?>> stats = ImmutableMap.<String, ADStat<?>>builder().put(StatNames.AD_EXECUTE_REQUEST_COUNT.getName(), new ADStat<>(false, new CounterSupplier())).put(StatNames.AD_EXECUTE_FAIL_COUNT.getName(), new ADStat<>(false, new CounterSupplier())).put(StatNames.AD_HC_EXECUTE_REQUEST_COUNT.getName(), new ADStat<>(false, new CounterSupplier())).put(StatNames.AD_HC_EXECUTE_FAIL_COUNT.getName(), new ADStat<>(false, new CounterSupplier())).put(StatNames.MODEL_INFORMATION.getName(), new ADStat<>(false, new ModelsOnNodeSupplier(modelManager, cacheProvider, settings, clusterService))).put(StatNames.ANOMALY_DETECTORS_INDEX_STATUS.getName(), new ADStat<>(true, new IndexStatusSupplier(indexUtils, AnomalyDetector.ANOMALY_DETECTORS_INDEX))).put(StatNames.ANOMALY_RESULTS_INDEX_STATUS.getName(), new ADStat<>(true, new IndexStatusSupplier(indexUtils, CommonName.ANOMALY_RESULT_INDEX_ALIAS))).put(StatNames.MODELS_CHECKPOINT_INDEX_STATUS.getName(), new ADStat<>(true, new IndexStatusSupplier(indexUtils, CommonName.CHECKPOINT_INDEX_NAME))).put(StatNames.ANOMALY_DETECTION_JOB_INDEX_STATUS.getName(), new ADStat<>(true, new IndexStatusSupplier(indexUtils, AnomalyDetectorJob.ANOMALY_DETECTOR_JOB_INDEX))).put(StatNames.ANOMALY_DETECTION_STATE_STATUS.getName(), new ADStat<>(true, new IndexStatusSupplier(indexUtils, CommonName.DETECTION_STATE_INDEX))).put(StatNames.DETECTOR_COUNT.getName(), new ADStat<>(true, new SettableSupplier())).put(StatNames.SINGLE_ENTITY_DETECTOR_COUNT.getName(), new ADStat<>(true, new SettableSupplier())).put(StatNames.MULTI_ENTITY_DETECTOR_COUNT.getName(), new ADStat<>(true, new SettableSupplier())).put(StatNames.AD_EXECUTING_BATCH_TASK_COUNT.getName(), new ADStat<>(false, new CounterSupplier())).put(StatNames.AD_CANCELED_BATCH_TASK_COUNT.getName(), new ADStat<>(false, new CounterSupplier())).put(StatNames.AD_TOTAL_BATCH_TASK_EXECUTION_COUNT.getName(), new ADStat<>(false, new CounterSupplier())).put(StatNames.AD_BATCH_TASK_FAILURE_COUNT.getName(), new ADStat<>(false, new CounterSupplier())).put(StatNames.MODEL_COUNT.getName(), new ADStat<>(false, new ModelsOnNodeCountSupplier(modelManager, cacheProvider))).build();
adStats = new ADStats(stats);
adTaskCacheManager = new ADTaskCacheManager(settings, clusterService, memoryTracker);
adTaskManager = new ADTaskManager(settings, clusterService, client, xContentRegistry, anomalyDetectionIndices, nodeFilter, hashRing, adTaskCacheManager, threadPool);
AnomalyResultBulkIndexHandler anomalyResultBulkIndexHandler = new AnomalyResultBulkIndexHandler(client, settings, threadPool, this.clientUtil, this.indexUtils, clusterService, anomalyDetectionIndices);
adBatchTaskRunner = new ADBatchTaskRunner(settings, threadPool, clusterService, client, adCircuitBreakerService, featureManager, adTaskManager, anomalyDetectionIndices, adStats, anomalyResultBulkIndexHandler, adTaskCacheManager, searchFeatureDao, hashRing, modelManager);
ADSearchHandler adSearchHandler = new ADSearchHandler(settings, clusterService, client);
// transport action handler constructors
return ImmutableList.of(anomalyDetectionIndices, anomalyDetectorRunner, searchFeatureDao, singleFeatureLinearUniformInterpolator, interpolator, gson, jvmService, hashRing, featureManager, modelManager, stateManager, new ADClusterEventListener(clusterService, hashRing), adCircuitBreakerService, adStats, new MasterEventListener(clusterService, threadPool, client, getClock(), clientUtil, nodeFilter), nodeFilter, multiEntityResultHandler, checkpoint, cacheProvider, adTaskManager, adBatchTaskRunner, adSearchHandler, coldstartQueue, resultWriteQueue, checkpointReadQueue, checkpointWriteQueue, coldEntityQueue, entityColdStarter, adTaskCacheManager);
}
use of org.opensearch.ad.caching.CacheProvider in project anomaly-detection by opensearch-project.
the class EntityProfileTests method setUp.
@Override
public void setUp() throws Exception {
super.setUp();
state = new HashSet<EntityProfileName>();
state.add(EntityProfileName.STATE);
all = new HashSet<EntityProfileName>();
all.add(EntityProfileName.INIT_PROGRESS);
all.add(EntityProfileName.ENTITY_INFO);
all.add(EntityProfileName.MODELS);
model = new HashSet<EntityProfileName>();
model.add(EntityProfileName.MODELS);
hashRing = mock(HashRing.class);
actionFilters = mock(ActionFilters.class);
transportService = new TransportService(Settings.EMPTY, mock(Transport.class), null, TransportService.NOOP_TRANSPORT_INTERCEPTOR, x -> null, null, Collections.emptySet());
settings = Settings.EMPTY;
modelId = "yecrdnUBqurvo9uKU_d8_entity_app_0";
clusterService = mock(ClusterService.class);
cacheProvider = mock(CacheProvider.class);
EntityCache cache = mock(EntityCache.class);
updates = 1L;
when(cache.getTotalUpdates(anyString(), anyString())).thenReturn(updates);
when(cache.isActive(anyString(), anyString())).thenReturn(isActive);
when(cache.getLastActiveMs(anyString(), anyString())).thenReturn(lastActiveTimestamp);
Map<String, Long> modelSizeMap = new HashMap<>();
modelSizeMap.put(modelId, modelSize);
when(cache.getModelSize(anyString())).thenReturn(modelSizeMap);
when(cacheProvider.get()).thenReturn(cache);
action = new EntityProfileTransportAction(actionFilters, transportService, settings, hashRing, clusterService, cacheProvider);
future = new PlainActionFuture<>();
transportAddress1 = new TransportAddress(new InetSocketAddress(InetAddress.getByName("1.2.3.4"), 9300));
entity = Entity.createSingleAttributeEntity(categoryName, entityValue);
request = new EntityProfileRequest(detectorId, entity, state);
normalTransportInterceptor = new TransportInterceptor() {
@Override
public AsyncSender interceptSender(AsyncSender sender) {
return new AsyncSender() {
@Override
public <T extends TransportResponse> void sendRequest(Transport.Connection connection, String action, TransportRequest request, TransportRequestOptions options, TransportResponseHandler<T> handler) {
if (EntityProfileAction.NAME.equals(action)) {
sender.sendRequest(connection, action, request, options, entityProfileHandler(handler));
} else {
sender.sendRequest(connection, action, request, options, handler);
}
}
};
}
};
failureTransportInterceptor = new TransportInterceptor() {
@Override
public AsyncSender interceptSender(AsyncSender sender) {
return new AsyncSender() {
@Override
public <T extends TransportResponse> void sendRequest(Transport.Connection connection, String action, TransportRequest request, TransportRequestOptions options, TransportResponseHandler<T> handler) {
if (EntityProfileAction.NAME.equals(action)) {
sender.sendRequest(connection, action, request, options, entityFailureProfileandler(handler));
} else {
sender.sendRequest(connection, action, request, options, handler);
}
}
};
}
};
}
Aggregations