Search in sources :

Example 56 with ClusterService

use of org.opensearch.cluster.service.ClusterService in project k-NN by opensearch-project.

the class ModelCacheTests method testRemoveAll.

public void testRemoveAll() throws ExecutionException, InterruptedException {
    int dimension = 2;
    String modelId1 = "test-model-id-1";
    int modelSize1 = BYTES_PER_KILOBYTES;
    Model mockModel1 = new Model(new ModelMetadata(KNNEngine.DEFAULT, SpaceType.DEFAULT, dimension, ModelState.CREATED, ZonedDateTime.now(ZoneOffset.UTC).toString(), "", ""), new byte[modelSize1], modelId1);
    String modelId2 = "test-model-id-2";
    int modelSize2 = BYTES_PER_KILOBYTES * 2;
    Model mockModel2 = new Model(new ModelMetadata(KNNEngine.DEFAULT, SpaceType.DEFAULT, dimension, ModelState.CREATED, ZonedDateTime.now(ZoneOffset.UTC).toString(), "", ""), new byte[modelSize2], modelId2);
    String cacheSize = "10%";
    ModelDao modelDao = mock(ModelDao.class);
    when(modelDao.get(modelId1)).thenReturn(mockModel1);
    when(modelDao.get(modelId2)).thenReturn(mockModel2);
    Settings settings = Settings.builder().put(MODEL_CACHE_SIZE_LIMIT_SETTING.getKey(), cacheSize).build();
    ClusterSettings clusterSettings = new ClusterSettings(settings, ImmutableSet.of(MODEL_CACHE_SIZE_LIMIT_SETTING));
    ClusterService clusterService = mock(ClusterService.class);
    when(clusterService.getClusterSettings()).thenReturn(clusterSettings);
    when(clusterService.getSettings()).thenReturn(settings);
    ModelCache.initialize(modelDao, clusterService);
    ModelCache modelCache = new ModelCache();
    modelCache.get(modelId1);
    modelCache.get(modelId2);
    assertEquals(((modelSize1 + modelSize2) / BYTES_PER_KILOBYTES) + 2, modelCache.getTotalWeightInKB());
    modelCache.removeAll();
    assertEquals(0, modelCache.getTotalWeightInKB());
}
Also used : ClusterSettings(org.opensearch.common.settings.ClusterSettings) ClusterService(org.opensearch.cluster.service.ClusterService) Settings(org.opensearch.common.settings.Settings) ClusterSettings(org.opensearch.common.settings.ClusterSettings)

Example 57 with ClusterService

use of org.opensearch.cluster.service.ClusterService in project anomaly-detection by opensearch-project.

the class EntityColdStarterTests method setUp.

@SuppressWarnings("unchecked")
@Override
public void setUp() throws Exception {
    super.setUp();
    numMinSamples = AnomalyDetectorSettings.NUM_MIN_SAMPLES;
    clock = mock(Clock.class);
    when(clock.instant()).thenReturn(Instant.now());
    threadPool = mock(ThreadPool.class);
    setUpADThreadPool(threadPool);
    settings = Settings.EMPTY;
    Client client = mock(Client.class);
    clientUtil = mock(ClientUtil.class);
    detector = TestHelpers.AnomalyDetectorBuilder.newInstance().setDetectionInterval(new IntervalTimeConfiguration(1, ChronoUnit.MINUTES)).setCategoryFields(ImmutableList.of(randomAlphaOfLength(5))).build();
    job = TestHelpers.randomAnomalyDetectorJob(true, Instant.ofEpochMilli(1602401500000L), null);
    doAnswer(invocation -> {
        GetRequest request = invocation.getArgument(0);
        ActionListener<GetResponse> listener = invocation.getArgument(2);
        if (request.index().equals(AnomalyDetectorJob.ANOMALY_DETECTOR_JOB_INDEX)) {
            listener.onResponse(TestHelpers.createGetResponse(job, detectorId, AnomalyDetectorJob.ANOMALY_DETECTOR_JOB_INDEX));
        } else {
            listener.onResponse(TestHelpers.createGetResponse(detector, detectorId, AnomalyDetector.ANOMALY_DETECTORS_INDEX));
        }
        return null;
    }).when(clientUtil).asyncRequest(any(GetRequest.class), any(), any(ActionListener.class));
    Set<Setting<?>> nodestateSetting = new HashSet<>(ClusterSettings.BUILT_IN_CLUSTER_SETTINGS);
    nodestateSetting.add(MAX_RETRY_FOR_UNRESPONSIVE_NODE);
    nodestateSetting.add(BACKOFF_MINUTES);
    ClusterSettings clusterSettings = new ClusterSettings(Settings.EMPTY, nodestateSetting);
    DiscoveryNode discoveryNode = new DiscoveryNode("node1", OpenSearchTestCase.buildNewFakeTransportAddress(), Collections.emptyMap(), DiscoveryNodeRole.BUILT_IN_ROLES, Version.CURRENT);
    ClusterService clusterService = ClusterServiceUtils.createClusterService(threadPool, discoveryNode, clusterSettings);
    stateManager = new NodeStateManager(client, xContentRegistry(), settings, clientUtil, clock, AnomalyDetectorSettings.HOURLY_MAINTENANCE, clusterService);
    SingleFeatureLinearUniformInterpolator singleFeatureLinearUniformInterpolator = new IntegerSensitiveSingleFeatureLinearUniformInterpolator();
    interpolator = new LinearUniformInterpolator(singleFeatureLinearUniformInterpolator);
    searchFeatureDao = mock(SearchFeatureDao.class);
    checkpoint = mock(CheckpointDao.class);
    featureManager = new FeatureManager(searchFeatureDao, interpolator, clock, AnomalyDetectorSettings.MAX_TRAIN_SAMPLE, AnomalyDetectorSettings.MAX_SAMPLE_STRIDE, AnomalyDetectorSettings.TRAIN_SAMPLE_TIME_RANGE_IN_HOURS, AnomalyDetectorSettings.MIN_TRAIN_SAMPLES, AnomalyDetectorSettings.MAX_SHINGLE_PROPORTION_MISSING, AnomalyDetectorSettings.MAX_IMPUTATION_NEIGHBOR_DISTANCE, AnomalyDetectorSettings.PREVIEW_SAMPLE_RATE, AnomalyDetectorSettings.MAX_PREVIEW_SAMPLES, AnomalyDetectorSettings.HOURLY_MAINTENANCE, threadPool, AnomalyDetectorPlugin.AD_THREAD_POOL_NAME);
    checkpointWriteQueue = mock(CheckpointWriteWorker.class);
    rcfSeed = 2051L;
    entityColdStarter = new EntityColdStarter(clock, threadPool, stateManager, AnomalyDetectorSettings.NUM_SAMPLES_PER_TREE, AnomalyDetectorSettings.NUM_TREES, AnomalyDetectorSettings.TIME_DECAY, numMinSamples, AnomalyDetectorSettings.MAX_SAMPLE_STRIDE, AnomalyDetectorSettings.MAX_TRAIN_SAMPLE, interpolator, searchFeatureDao, AnomalyDetectorSettings.THRESHOLD_MIN_PVALUE, featureManager, settings, AnomalyDetectorSettings.HOURLY_MAINTENANCE, checkpointWriteQueue, rcfSeed, AnomalyDetectorSettings.MAX_COLD_START_ROUNDS);
    detectorId = "123";
    modelId = "123_entity_abc";
    entityName = "abc";
    priority = 0.3f;
    entity = Entity.createSingleAttributeEntity("field", entityName);
    released = new AtomicBoolean();
    inProgressLatch = new CountDownLatch(1);
    releaseSemaphore = () -> {
        released.set(true);
        inProgressLatch.countDown();
    };
    listener = ActionListener.wrap(releaseSemaphore);
    modelManager = new ModelManager(mock(CheckpointDao.class), mock(Clock.class), AnomalyDetectorSettings.NUM_TREES, AnomalyDetectorSettings.NUM_SAMPLES_PER_TREE, AnomalyDetectorSettings.TIME_DECAY, AnomalyDetectorSettings.NUM_MIN_SAMPLES, AnomalyDetectorSettings.THRESHOLD_MIN_PVALUE, AnomalyDetectorSettings.MIN_PREVIEW_SIZE, AnomalyDetectorSettings.HOURLY_MAINTENANCE, AnomalyDetectorSettings.HOURLY_MAINTENANCE, entityColdStarter, mock(FeatureManager.class), mock(MemoryTracker.class));
}
Also used : DiscoveryNode(org.opensearch.cluster.node.DiscoveryNode) ClusterSettings(org.opensearch.common.settings.ClusterSettings) ClientUtil(org.opensearch.ad.util.ClientUtil) ThreadPool(org.opensearch.threadpool.ThreadPool) IntervalTimeConfiguration(org.opensearch.ad.model.IntervalTimeConfiguration) SearchFeatureDao(org.opensearch.ad.feature.SearchFeatureDao) Clock(java.time.Clock) NodeStateManager(org.opensearch.ad.NodeStateManager) IntegerSensitiveSingleFeatureLinearUniformInterpolator(org.opensearch.ad.dataprocessor.IntegerSensitiveSingleFeatureLinearUniformInterpolator) GetRequest(org.opensearch.action.get.GetRequest) SingleFeatureLinearUniformInterpolator(org.opensearch.ad.dataprocessor.SingleFeatureLinearUniformInterpolator) IntegerSensitiveSingleFeatureLinearUniformInterpolator(org.opensearch.ad.dataprocessor.IntegerSensitiveSingleFeatureLinearUniformInterpolator) LinearUniformInterpolator(org.opensearch.ad.dataprocessor.LinearUniformInterpolator) Client(org.opensearch.client.Client) FeatureManager(org.opensearch.ad.feature.FeatureManager) HashSet(java.util.HashSet) Setting(org.opensearch.common.settings.Setting) CountDownLatch(java.util.concurrent.CountDownLatch) GetResponse(org.opensearch.action.get.GetResponse) SingleFeatureLinearUniformInterpolator(org.opensearch.ad.dataprocessor.SingleFeatureLinearUniformInterpolator) IntegerSensitiveSingleFeatureLinearUniformInterpolator(org.opensearch.ad.dataprocessor.IntegerSensitiveSingleFeatureLinearUniformInterpolator) CheckpointWriteWorker(org.opensearch.ad.ratelimit.CheckpointWriteWorker) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) ClusterService(org.opensearch.cluster.service.ClusterService) ActionListener(org.opensearch.action.ActionListener)

Example 58 with ClusterService

use of org.opensearch.cluster.service.ClusterService in project anomaly-detection by opensearch-project.

the class CheckpointReadWorkerTests method setUp.

@Override
public void setUp() throws Exception {
    super.setUp();
    clusterService = mock(ClusterService.class);
    clusterSettings = new ClusterSettings(Settings.EMPTY, Collections.unmodifiableSet(new HashSet<>(Arrays.asList(AnomalyDetectorSettings.CHECKPOINT_READ_QUEUE_MAX_HEAP_PERCENT, AnomalyDetectorSettings.CHECKPOINT_READ_QUEUE_CONCURRENCY, AnomalyDetectorSettings.CHECKPOINT_READ_QUEUE_BATCH_SIZE))));
    when(clusterService.getClusterSettings()).thenReturn(clusterSettings);
    state = MLUtil.randomModelState(new RandomModelStateConfig.Builder().fullModel(true).build());
    checkpoint = mock(CheckpointDao.class);
    Map.Entry<EntityModel, Instant> entry = new SimpleImmutableEntry<EntityModel, Instant>(state.getModel(), Instant.now());
    when(checkpoint.processGetResponse(any(), anyString())).thenReturn(Optional.of(entry));
    checkpointWriteQueue = mock(CheckpointWriteWorker.class);
    modelManager = mock(ModelManager.class);
    when(modelManager.processEntityCheckpoint(any(), any(), anyString(), anyString(), anyInt())).thenReturn(state);
    when(modelManager.score(any(), anyString(), any())).thenReturn(new ThresholdingResult(0, 1, 0.7));
    coldstartQueue = mock(EntityColdStartWorker.class);
    resultWriteQueue = mock(ResultWriteWorker.class);
    anomalyDetectionIndices = mock(AnomalyDetectionIndices.class);
    cacheProvider = mock(CacheProvider.class);
    entityCache = mock(EntityCache.class);
    when(cacheProvider.get()).thenReturn(entityCache);
    when(entityCache.hostIfPossible(any(), any())).thenReturn(true);
    // Integer.MAX_VALUE makes a huge heap
    worker = new CheckpointReadWorker(Integer.MAX_VALUE, AnomalyDetectorSettings.ENTITY_FEATURE_REQUEST_SIZE_IN_BYTES, AnomalyDetectorSettings.CHECKPOINT_READ_QUEUE_MAX_HEAP_PERCENT, clusterService, new Random(42), mock(ADCircuitBreakerService.class), threadPool, Settings.EMPTY, AnomalyDetectorSettings.MAX_QUEUED_TASKS_RATIO, clock, AnomalyDetectorSettings.MEDIUM_SEGMENT_PRUNE_RATIO, AnomalyDetectorSettings.LOW_SEGMENT_PRUNE_RATIO, AnomalyDetectorSettings.MAINTENANCE_FREQ_CONSTANT, AnomalyDetectorSettings.QUEUE_MAINTENANCE, modelManager, checkpoint, coldstartQueue, resultWriteQueue, nodeStateManager, anomalyDetectionIndices, cacheProvider, AnomalyDetectorSettings.HOURLY_MAINTENANCE, checkpointWriteQueue);
    request = new EntityFeatureRequest(Integer.MAX_VALUE, detectorId, RequestPriority.MEDIUM, entity, new double[] { 0 }, 0);
    request2 = new EntityFeatureRequest(Integer.MAX_VALUE, detectorId, RequestPriority.MEDIUM, entity2, new double[] { 0 }, 0);
    request3 = new EntityFeatureRequest(Integer.MAX_VALUE, detectorId, RequestPriority.MEDIUM, entity3, new double[] { 0 }, 0);
}
Also used : ClusterSettings(org.opensearch.common.settings.ClusterSettings) EntityCache(org.opensearch.ad.caching.EntityCache) Instant(java.time.Instant) EntityModel(org.opensearch.ad.ml.EntityModel) ModelManager(org.opensearch.ad.ml.ModelManager) CacheProvider(org.opensearch.ad.caching.CacheProvider) ThresholdingResult(org.opensearch.ad.ml.ThresholdingResult) ClusterService(org.opensearch.cluster.service.ClusterService) CheckpointDao(org.opensearch.ad.ml.CheckpointDao) Random(java.util.Random) SimpleImmutableEntry(java.util.AbstractMap.SimpleImmutableEntry) AnomalyDetectionIndices(org.opensearch.ad.indices.AnomalyDetectionIndices) Map(java.util.Map)

Example 59 with ClusterService

use of org.opensearch.cluster.service.ClusterService in project anomaly-detection by opensearch-project.

the class CheckpointWriteWorkerTests method setUp.

@Override
@SuppressWarnings("unchecked")
public void setUp() throws Exception {
    super.setUp();
    clusterService = mock(ClusterService.class);
    ClusterSettings clusterSettings = new ClusterSettings(Settings.EMPTY, Collections.unmodifiableSet(new HashSet<>(Arrays.asList(AnomalyDetectorSettings.CHECKPOINT_WRITE_QUEUE_MAX_HEAP_PERCENT, AnomalyDetectorSettings.CHECKPOINT_WRITE_QUEUE_CONCURRENCY, AnomalyDetectorSettings.CHECKPOINT_WRITE_QUEUE_BATCH_SIZE))));
    when(clusterService.getClusterSettings()).thenReturn(clusterSettings);
    checkpoint = mock(CheckpointDao.class);
    Map<String, Object> checkpointMap = new HashMap<>();
    checkpointMap.put(CheckpointDao.FIELD_MODEL, "a");
    when(checkpoint.toIndexSource(any())).thenReturn(checkpointMap);
    // Integer.MAX_VALUE makes a huge heap
    worker = new CheckpointWriteWorker(Integer.MAX_VALUE, AnomalyDetectorSettings.CHECKPOINT_WRITE_QUEUE_SIZE_IN_BYTES, AnomalyDetectorSettings.CHECKPOINT_WRITE_QUEUE_MAX_HEAP_PERCENT, clusterService, new Random(42), mock(ADCircuitBreakerService.class), threadPool, Settings.EMPTY, AnomalyDetectorSettings.MAX_QUEUED_TASKS_RATIO, clock, AnomalyDetectorSettings.MEDIUM_SEGMENT_PRUNE_RATIO, AnomalyDetectorSettings.LOW_SEGMENT_PRUNE_RATIO, AnomalyDetectorSettings.MAINTENANCE_FREQ_CONSTANT, AnomalyDetectorSettings.QUEUE_MAINTENANCE, checkpoint, CommonName.CHECKPOINT_INDEX_NAME, AnomalyDetectorSettings.HOURLY_MAINTENANCE, nodeStateManager, AnomalyDetectorSettings.HOURLY_MAINTENANCE);
    state = MLUtil.randomModelState(new RandomModelStateConfig.Builder().build());
}
Also used : RandomModelStateConfig(test.org.opensearch.ad.util.RandomModelStateConfig) ClusterService(org.opensearch.cluster.service.ClusterService) ClusterSettings(org.opensearch.common.settings.ClusterSettings) CheckpointDao(org.opensearch.ad.ml.CheckpointDao) Random(java.util.Random) HashMap(java.util.HashMap) HashSet(java.util.HashSet)

Example 60 with ClusterService

use of org.opensearch.cluster.service.ClusterService in project anomaly-detection by opensearch-project.

the class ModelManagerTests method getRcfResult_throwToListener_whenHeapLimitExceed.

@Test
public void getRcfResult_throwToListener_whenHeapLimitExceed() {
    doAnswer(invocation -> {
        ActionListener<Optional<ThresholdedRandomCutForest>> listener = invocation.getArgument(1);
        listener.onResponse(Optional.of(rcf));
        return null;
    }).when(checkpointDao).getTRCFModel(eq(rcfModelId), any(ActionListener.class));
    when(jvmService.info().getMem().getHeapMax().getBytes()).thenReturn(1_000L);
    final Set<Setting<?>> settingsSet = Stream.concat(ClusterSettings.BUILT_IN_CLUSTER_SETTINGS.stream(), Sets.newHashSet(AnomalyDetectorSettings.MODEL_MAX_SIZE_PERCENTAGE).stream()).collect(Collectors.toSet());
    ClusterSettings clusterSettings = new ClusterSettings(settings, settingsSet);
    clusterService = new ClusterService(settings, clusterSettings, null);
    MemoryTracker memoryTracker = new MemoryTracker(jvmService, modelMaxSizePercentage, modelDesiredSizePercentage, clusterService, adCircuitBreakerService);
    ActionListener<ThresholdingResult> listener = mock(ActionListener.class);
    // use new memoryTracker
    modelManager = spy(new ModelManager(checkpointDao, clock, numTrees, numSamples, rcfTimeDecay, numMinSamples, thresholdMinPvalue, minPreviewSize, modelTtl, checkpointInterval, entityColdStarter, featureManager, memoryTracker));
    modelManager.getTRcfResult(detectorId, rcfModelId, new double[0], listener);
    verify(listener).onFailure(any(LimitExceededException.class));
}
Also used : ClusterSettings(org.opensearch.common.settings.ClusterSettings) ClusterService(org.opensearch.cluster.service.ClusterService) Optional(java.util.Optional) ActionListener(org.opensearch.action.ActionListener) Setting(org.opensearch.common.settings.Setting) LimitExceededException(org.opensearch.ad.common.exception.LimitExceededException) MemoryTracker(org.opensearch.ad.MemoryTracker) Test(org.junit.Test)

Aggregations

ClusterService (org.opensearch.cluster.service.ClusterService)296 ThreadPool (org.opensearch.threadpool.ThreadPool)123 Settings (org.opensearch.common.settings.Settings)115 ClusterState (org.opensearch.cluster.ClusterState)106 DiscoveryNode (org.opensearch.cluster.node.DiscoveryNode)103 TestThreadPool (org.opensearch.threadpool.TestThreadPool)93 TransportService (org.opensearch.transport.TransportService)86 ClusterSettings (org.opensearch.common.settings.ClusterSettings)76 ActionListener (org.opensearch.action.ActionListener)75 Before (org.junit.Before)66 ActionFilters (org.opensearch.action.support.ActionFilters)66 CountDownLatch (java.util.concurrent.CountDownLatch)65 HashSet (java.util.HashSet)63 TimeValue (org.opensearch.common.unit.TimeValue)61 IOException (java.io.IOException)56 IndexMetadata (org.opensearch.cluster.metadata.IndexMetadata)53 OpenSearchTestCase (org.opensearch.test.OpenSearchTestCase)48 Collections (java.util.Collections)47 ClusterName (org.opensearch.cluster.ClusterName)47 Metadata (org.opensearch.cluster.metadata.Metadata)47