use of org.kie.kogito.explainability.model.Prediction in project kogito-apps by kiegroup.
the class CounterfactualExplainerTest method testFinalUniqueIds.
@ParameterizedTest
@ValueSource(ints = { 0, 1, 2 })
void testFinalUniqueIds(int seed) throws ExecutionException, InterruptedException, TimeoutException {
Random random = new Random();
random.setSeed(seed);
final List<Output> goal = new ArrayList<>();
List<Feature> features = List.of(FeatureFactory.newNumericalFeature("f-num1", 10.0, NumericalFeatureDomain.create(0, 20)));
PredictionProvider model = TestUtils.getFeaturePassModel(0);
final TerminationConfig terminationConfig = new TerminationConfig().withScoreCalculationCountLimit(100_000L);
final SolverConfig solverConfig = SolverConfigBuilder.builder().withTerminationConfig(terminationConfig).build();
solverConfig.setRandomSeed((long) seed);
solverConfig.setEnvironmentMode(EnvironmentMode.REPRODUCIBLE);
final List<UUID> intermediateIds = new ArrayList<>();
final List<UUID> executionIds = new ArrayList<>();
final Consumer<CounterfactualResult> captureIntermediateIds = counterfactual -> {
intermediateIds.add(counterfactual.getSolutionId());
};
final Consumer<CounterfactualResult> captureExecutionIds = counterfactual -> {
executionIds.add(counterfactual.getExecutionId());
};
final CounterfactualConfig counterfactualConfig = new CounterfactualConfig().withSolverConfig(solverConfig);
solverConfig.withEasyScoreCalculatorClass(MockCounterFactualScoreCalculator.class);
final CounterfactualExplainer counterfactualExplainer = new CounterfactualExplainer(counterfactualConfig);
PredictionInput input = new PredictionInput(features);
PredictionOutput output = new PredictionOutput(goal);
final UUID executionId = UUID.randomUUID();
Prediction prediction = new CounterfactualPrediction(input, output, null, executionId, null);
final CounterfactualResult counterfactualResult = counterfactualExplainer.explainAsync(prediction, model, captureIntermediateIds.andThen(captureExecutionIds)).get(Config.INSTANCE.getAsyncTimeout(), Config.INSTANCE.getAsyncTimeUnit());
for (CounterfactualEntity entity : counterfactualResult.getEntities()) {
logger.debug("Entity: {}", entity);
}
// All intermediate ids should be unique
assertEquals((int) intermediateIds.stream().distinct().count(), intermediateIds.size());
// There should be at least one intermediate id
assertTrue(intermediateIds.size() > 0);
// There should be at least one execution id
assertTrue(executionIds.size() > 0);
// We should have the same number of execution ids as intermediate ids (captured from intermediate results)
assertEquals(executionIds.size(), intermediateIds.size());
// All execution ids should be the same
assertEquals(1, (int) executionIds.stream().distinct().count());
// The last intermediate id must be different from the final result id
assertNotEquals(intermediateIds.get(intermediateIds.size() - 1), counterfactualResult.getSolutionId());
// Captured execution ids should be the same as the one provided
assertEquals(executionIds.get(0), executionId);
}
use of org.kie.kogito.explainability.model.Prediction in project kogito-apps by kiegroup.
the class CounterfactualExplainerTest method mockExplainerInvocation.
@SuppressWarnings("unchecked")
CounterfactualResult mockExplainerInvocation(Consumer<CounterfactualResult> intermediateResultsConsumer, Long maxRunningTimeSeconds) throws ExecutionException, InterruptedException, TimeoutException {
// Mock SolverManager and SolverJob to guarantee deterministic test behaviour
SolverJob<CounterfactualSolution, UUID> solverJob = mock(SolverJob.class);
CounterfactualSolution solution = mock(CounterfactualSolution.class);
BendableBigDecimalScore score = BendableBigDecimalScore.zero(0, 0);
when(solverManager.solveAndListen(any(), any(), any(), any())).thenReturn(solverJob);
when(solverJob.getFinalBestSolution()).thenReturn(solution);
when(solution.getScore()).thenReturn(score);
when(solverManagerFactory.apply(any())).thenReturn(solverManager);
// Setup Explainer
final CounterfactualConfig counterfactualConfig = new CounterfactualConfig().withSolverManagerFactory(solverManagerFactory);
final CounterfactualExplainer counterfactualExplainer = new CounterfactualExplainer(counterfactualConfig);
// Setup mock model, what it does is not important
Prediction prediction = new CounterfactualPrediction(new PredictionInput(Collections.emptyList()), new PredictionOutput(Collections.emptyList()), null, UUID.randomUUID(), maxRunningTimeSeconds);
return counterfactualExplainer.explainAsync(prediction, (List<PredictionInput> inputs) -> CompletableFuture.completedFuture(Collections.emptyList()), intermediateResultsConsumer).get(Config.INSTANCE.getAsyncTimeout(), Config.INSTANCE.getAsyncTimeUnit());
}
use of org.kie.kogito.explainability.model.Prediction in project kogito-apps by kiegroup.
the class CounterfactualExplainerTest method testIntermediateUniqueIds.
@ParameterizedTest
@ValueSource(ints = { 0, 1, 2 })
void testIntermediateUniqueIds(int seed) throws ExecutionException, InterruptedException, TimeoutException {
Random random = new Random();
random.setSeed(seed);
final List<Output> goal = new ArrayList<>();
List<Feature> features = List.of(FeatureFactory.newNumericalFeature("f-num1", 10.0, NumericalFeatureDomain.create(0, 20)));
PredictionProvider model = TestUtils.getFeaturePassModel(0);
final TerminationConfig terminationConfig = new TerminationConfig().withScoreCalculationCountLimit(100_000L);
final SolverConfig solverConfig = SolverConfigBuilder.builder().withTerminationConfig(terminationConfig).build();
solverConfig.setRandomSeed((long) seed);
solverConfig.setEnvironmentMode(EnvironmentMode.REPRODUCIBLE);
final List<UUID> intermediateIds = new ArrayList<>();
final List<UUID> executionIds = new ArrayList<>();
final Consumer<CounterfactualResult> captureIntermediateIds = counterfactual -> {
intermediateIds.add(counterfactual.getSolutionId());
};
final Consumer<CounterfactualResult> captureExecutionIds = counterfactual -> {
executionIds.add(counterfactual.getExecutionId());
};
final CounterfactualConfig counterfactualConfig = new CounterfactualConfig().withSolverConfig(solverConfig);
solverConfig.withEasyScoreCalculatorClass(MockCounterFactualScoreCalculator.class);
final CounterfactualExplainer counterfactualExplainer = new CounterfactualExplainer(counterfactualConfig);
PredictionInput input = new PredictionInput(features);
PredictionOutput output = new PredictionOutput(goal);
final UUID executionId = UUID.randomUUID();
Prediction prediction = new CounterfactualPrediction(input, output, null, executionId, null);
final CounterfactualResult counterfactualResult = counterfactualExplainer.explainAsync(prediction, model, captureIntermediateIds.andThen(captureExecutionIds)).get(Config.INSTANCE.getAsyncTimeout(), Config.INSTANCE.getAsyncTimeUnit());
for (CounterfactualEntity entity : counterfactualResult.getEntities()) {
logger.debug("Entity: {}", entity);
}
// all intermediate Ids must be distinct
assertEquals((int) intermediateIds.stream().distinct().count(), intermediateIds.size());
assertEquals(1, (int) executionIds.stream().distinct().count());
assertEquals(executionIds.get(0), executionId);
}
use of org.kie.kogito.explainability.model.Prediction in project kogito-apps by kiegroup.
the class CounterfactualExplainerTest method runCounterfactualSearch.
private CounterfactualResult runCounterfactualSearch(Long randomSeed, List<Output> goal, List<Feature> features, PredictionProvider model, double goalThresold) throws InterruptedException, ExecutionException, TimeoutException {
final TerminationConfig terminationConfig = new TerminationConfig().withScoreCalculationCountLimit(steps);
final SolverConfig solverConfig = SolverConfigBuilder.builder().withTerminationConfig(terminationConfig).build();
solverConfig.setRandomSeed(randomSeed);
solverConfig.setEnvironmentMode(EnvironmentMode.REPRODUCIBLE);
final CounterfactualConfig counterfactualConfig = new CounterfactualConfig();
counterfactualConfig.withSolverConfig(solverConfig).withGoalThreshold(goalThresold);
final CounterfactualExplainer explainer = new CounterfactualExplainer(counterfactualConfig);
final PredictionInput input = new PredictionInput(features);
PredictionOutput output = new PredictionOutput(goal);
Prediction prediction = new CounterfactualPrediction(input, output, null, UUID.randomUUID(), null);
return explainer.explainAsync(prediction, model).get(predictionTimeOut, predictionTimeUnit);
}
use of org.kie.kogito.explainability.model.Prediction in project kogito-apps by kiegroup.
the class ExplainabilityMetrics method getLocalSaliencyRecall.
/**
* Evaluate the recall of a local saliency explainer on a given model.
* Get the predictions having outputs with the highest score for the given decision and pair them with predictions
* whose outputs have the lowest score for the same decision.
* Get the top k (most important) features (according to the saliency) for the most important outputs and
* "paste" them on each paired input corresponding to an output with low score (for the target decision).
* Perform prediction on the "masked" input, if the output on the masked input is equals to the output for the
* input the mask features were take from, that's considered a true positive, otherwise it's a false positive.
* see Section 3.2.1 of https://openreview.net/attachment?id=B1xBAA4FwH&name=original_pdf
*
* @param outputName decision to evaluate recall for
* @param predictionProvider the prediction provider to test
* @param localExplainer the explainer to evaluate
* @param dataDistribution the data distribution used to obtain inputs for evaluation
* @param k the no. of features to extract
* @param chunkSize the size of the chunk of predictions to use for evaluation
* @return the saliency recall
*/
public static double getLocalSaliencyRecall(String outputName, PredictionProvider predictionProvider, LocalExplainer<Map<String, Saliency>> localExplainer, DataDistribution dataDistribution, int k, int chunkSize) throws InterruptedException, ExecutionException, TimeoutException {
// get all samples from the data distribution
List<Prediction> sorted = DataUtils.getScoreSortedPredictions(outputName, predictionProvider, dataDistribution);
// get the top and bottom 'chunkSize' predictions
List<Prediction> topChunk = new ArrayList<>(sorted.subList(0, chunkSize));
List<Prediction> bottomChunk = new ArrayList<>(sorted.subList(sorted.size() - chunkSize, sorted.size()));
double truePositives = 0;
double falseNegatives = 0;
int currentChunk = 0;
// input, then feed the model with this masked input and check the output is equals to the top scored one.
for (Prediction prediction : topChunk) {
Optional<Output> optionalOutput = prediction.getOutput().getByName(outputName);
if (optionalOutput.isPresent()) {
Output output = optionalOutput.get();
Map<String, Saliency> stringSaliencyMap = localExplainer.explainAsync(prediction, predictionProvider).get(Config.DEFAULT_ASYNC_TIMEOUT, Config.DEFAULT_ASYNC_TIMEUNIT);
if (stringSaliencyMap.containsKey(outputName)) {
Saliency saliency = stringSaliencyMap.get(outputName);
List<FeatureImportance> topFeatures = saliency.getPerFeatureImportance().stream().sorted((f1, f2) -> Double.compare(f2.getScore(), f1.getScore())).limit(k).collect(Collectors.toList());
PredictionInput input = bottomChunk.get(currentChunk).getInput();
PredictionInput maskedInput = maskInput(topFeatures, input);
List<PredictionOutput> predictionOutputList = predictionProvider.predictAsync(List.of(maskedInput)).get(Config.DEFAULT_ASYNC_TIMEOUT, Config.DEFAULT_ASYNC_TIMEUNIT);
if (!predictionOutputList.isEmpty()) {
PredictionOutput predictionOutput = predictionOutputList.get(0);
Optional<Output> optionalNewOutput = predictionOutput.getByName(outputName);
if (optionalNewOutput.isPresent()) {
Output newOutput = optionalOutput.get();
if (output.getValue().equals(newOutput.getValue())) {
truePositives++;
} else {
falseNegatives++;
}
}
}
currentChunk++;
}
}
}
if ((truePositives + falseNegatives) > 0) {
return truePositives / (truePositives + falseNegatives);
} else {
// if topChunk is empty or the target output (by name) is not an output of the model.
return Double.NaN;
}
}
Aggregations