use of org.kie.kogito.explainability.model.PerturbationContext in project kogito-apps by kiegroup.
the class DataUtilsTest method testPerturbFeaturesEmpty.
@Test
void testPerturbFeaturesEmpty() {
List<Feature> features = new LinkedList<>();
PerturbationContext perturbationContext = new PerturbationContext(random, 0);
List<Feature> newFeatures = DataUtils.perturbFeatures(features, perturbationContext);
assertNotNull(newFeatures);
assertEquals(features.size(), newFeatures.size());
}
use of org.kie.kogito.explainability.model.PerturbationContext in project kogito-apps by kiegroup.
the class DataUtilsTest method assertPerturbDropNumeric.
private void assertPerturbDropNumeric(PredictionInput input, int noOfPerturbations) {
List<Feature> newFeatures = DataUtils.perturbFeatures(input.getFeatures(), new PerturbationContext(random, noOfPerturbations));
int changedFeatures = 0;
for (int i = 0; i < input.getFeatures().size(); i++) {
double v = input.getFeatures().get(i).getValue().asNumber();
double pv = newFeatures.get(i).getValue().asNumber();
if (v != pv) {
changedFeatures++;
}
}
assertThat(changedFeatures).isBetween((int) Math.min(noOfPerturbations, input.getFeatures().size() * 0.5), (int) Math.max(noOfPerturbations, input.getFeatures().size() * 0.5));
}
use of org.kie.kogito.explainability.model.PerturbationContext in project kogito-apps by kiegroup.
the class PmmlScorecardCategoricalLimeExplainerTest method testExplanationImpactScoreWithOptimization.
@Test
void testExplanationImpactScoreWithOptimization() throws ExecutionException, InterruptedException {
PredictionProvider model = getModel();
List<PredictionInput> samples = getSamples();
List<PredictionOutput> predictionOutputs = model.predictAsync(samples.subList(0, 5)).get();
List<Prediction> predictions = DataUtils.getPredictions(samples, predictionOutputs);
long seed = 0;
LimeConfigOptimizer limeConfigOptimizer = new LimeConfigOptimizer().withDeterministicExecution(true).forImpactScore();
Random random = new Random();
PerturbationContext perturbationContext = new PerturbationContext(seed, random, 1);
LimeConfig initialConfig = new LimeConfig().withSamples(10).withPerturbationContext(perturbationContext);
LimeConfig optimizedConfig = limeConfigOptimizer.optimize(initialConfig, predictions, model);
assertThat(optimizedConfig).isNotSameAs(initialConfig);
}
use of org.kie.kogito.explainability.model.PerturbationContext in project kogito-apps by kiegroup.
the class PmmlScorecardCategoricalLimeExplainerTest method testExplanationWeightedStabilityWithOptimization.
@Test
void testExplanationWeightedStabilityWithOptimization() throws ExecutionException, InterruptedException, TimeoutException {
PredictionProvider model = getModel();
List<PredictionInput> samples = getSamples();
List<PredictionOutput> predictionOutputs = model.predictAsync(samples.subList(0, 5)).get();
List<Prediction> predictions = DataUtils.getPredictions(samples, predictionOutputs);
long seed = 0;
LimeConfigOptimizer limeConfigOptimizer = new LimeConfigOptimizer().withDeterministicExecution(true).withWeightedStability(0.4, 0.6);
Random random = new Random();
PerturbationContext perturbationContext = new PerturbationContext(seed, random, 1);
LimeConfig initialConfig = new LimeConfig().withSamples(10).withPerturbationContext(perturbationContext);
LimeConfig optimizedConfig = limeConfigOptimizer.optimize(initialConfig, predictions, model);
assertThat(optimizedConfig).isNotSameAs(initialConfig);
LimeExplainer limeExplainer = new LimeExplainer(optimizedConfig);
PredictionInput testPredictionInput = getTestInput();
List<PredictionOutput> testPredictionOutputs = model.predictAsync(List.of(testPredictionInput)).get(Config.INSTANCE.getAsyncTimeout(), Config.INSTANCE.getAsyncTimeUnit());
Prediction instance = new SimplePrediction(testPredictionInput, testPredictionOutputs.get(0));
assertDoesNotThrow(() -> ValidationUtils.validateLocalSaliencyStability(model, instance, limeExplainer, 1, 0.5, 0.7));
}
use of org.kie.kogito.explainability.model.PerturbationContext in project kogito-apps by kiegroup.
the class PmmlScorecardCategoricalLimeExplainerTest method testPMMLScorecardCategorical.
@Test
void testPMMLScorecardCategorical() throws Exception {
PredictionInput input = getTestInput();
Random random = new Random();
LimeConfig limeConfig = new LimeConfig().withSamples(10).withPerturbationContext(new PerturbationContext(0L, random, 1));
LimeExplainer limeExplainer = new LimeExplainer(limeConfig);
PredictionProvider model = getModel();
List<PredictionOutput> predictionOutputs = model.predictAsync(List.of(input)).get(Config.INSTANCE.getAsyncTimeout(), Config.INSTANCE.getAsyncTimeUnit());
assertThat(predictionOutputs).isNotNull().isNotEmpty();
PredictionOutput output = predictionOutputs.get(0);
assertThat(output).isNotNull();
Prediction prediction = new SimplePrediction(input, output);
Map<String, Saliency> saliencyMap = limeExplainer.explainAsync(prediction, model).get(Config.INSTANCE.getAsyncTimeout(), Config.INSTANCE.getAsyncTimeUnit());
for (Saliency saliency : saliencyMap.values()) {
assertThat(saliency).isNotNull();
double v = ExplainabilityMetrics.impactScore(model, prediction, saliency.getTopFeatures(2));
assertThat(v).isGreaterThan(0d);
}
assertDoesNotThrow(() -> ValidationUtils.validateLocalSaliencyStability(model, prediction, limeExplainer, 1, 0.4, 0.4));
List<PredictionInput> inputs = getSamples();
DataDistribution distribution = new PredictionInputsDataDistribution(inputs);
String decision = "score";
int k = 1;
int chunkSize = 2;
double f1 = ExplainabilityMetrics.getLocalSaliencyF1(decision, model, limeExplainer, distribution, k, chunkSize);
AssertionsForClassTypes.assertThat(f1).isBetween(0d, 1d);
}
Aggregations