use of org.kie.kogito.explainability.model.PredictionOutput in project kogito-apps by kiegroup.
the class RecordingLimeExplainerTest method testExplainNonOptimized.
@Test
void testExplainNonOptimized() throws ExecutionException, InterruptedException, TimeoutException {
RecordingLimeExplainer limeExplainer = new RecordingLimeExplainer(10);
List<Feature> features = new ArrayList<>();
for (int i = 0; i < 4; i++) {
features.add(TestUtils.getMockedNumericFeature(i));
}
PredictionInput input = new PredictionInput(features);
PredictionProvider model = TestUtils.getSumSkipModel(0);
PredictionOutput output = model.predictAsync(List.of(input)).get(Config.INSTANCE.getAsyncTimeout(), Config.INSTANCE.getAsyncTimeUnit()).get(0);
Prediction prediction = new SimplePrediction(input, output);
Map<String, Saliency> saliencyMap = limeExplainer.explainAsync(prediction, model).get(Config.INSTANCE.getAsyncTimeout(), Config.INSTANCE.getAsyncTimeUnit());
assertNotNull(saliencyMap);
}
use of org.kie.kogito.explainability.model.PredictionOutput in project kogito-apps by kiegroup.
the class RecordingLimeExplainerTest method testAutomaticConfigOptimization.
@ParameterizedTest
@ValueSource(longs = { 0 })
void testAutomaticConfigOptimization(long seed) throws Exception {
PredictionProvider model = TestUtils.getSumThresholdModel(10, 10);
PerturbationContext pc = new PerturbationContext(seed, new Random(), 1);
LimeConfig config = new LimeConfig().withPerturbationContext(pc);
RecordingLimeExplainer limeExplainer = new RecordingLimeExplainer(2);
for (int i = 0; i < 50; i++) {
List<Feature> features = new LinkedList<>();
features.add(TestUtils.getMockedNumericFeature(Type.NUMBER.randomValue(pc).asNumber()));
features.add(TestUtils.getMockedNumericFeature(Type.NUMBER.randomValue(pc).asNumber()));
features.add(TestUtils.getMockedNumericFeature(Type.NUMBER.randomValue(pc).asNumber()));
PredictionInput input = new PredictionInput(features);
List<PredictionOutput> outputs = model.predictAsync(List.of(input)).get(Config.INSTANCE.getAsyncTimeout(), Config.INSTANCE.getAsyncTimeUnit());
Prediction prediction = new SimplePrediction(input, outputs.get(0));
Map<String, Saliency> saliencyMap = limeExplainer.explainAsync(prediction, model).toCompletableFuture().get(Config.INSTANCE.getAsyncTimeout(), Config.INSTANCE.getAsyncTimeUnit());
for (Saliency saliency : saliencyMap.values()) {
assertNotNull(saliency);
}
}
LimeConfig optimizedConfig = limeExplainer.getExecutionConfig();
assertThat(optimizedConfig).isNotEqualTo(config);
}
use of org.kie.kogito.explainability.model.PredictionOutput in project kogito-apps by kiegroup.
the class ShapKernelExplainerTest method shapTestCase.
/*
* given a specific model, config, background, explanations, ske, and expected shap values,
* test that the computed shape values match expected shap values
*/
private void shapTestCase(PredictionProvider model, ShapKernelExplainer ske, double[][] toExplainRaw, double[][][] expected) throws InterruptedException, TimeoutException, ExecutionException {
// establish background data and desired data to explain
List<PredictionInput> toExplain = createPIFromMatrix(toExplainRaw);
// initialize explainer
List<PredictionOutput> predictionOutputs = model.predictAsync(toExplain).get(5, TimeUnit.SECONDS);
List<Prediction> predictions = new ArrayList<>();
for (int i = 0; i < predictionOutputs.size(); i++) {
predictions.add(new SimplePrediction(toExplain.get(i), predictionOutputs.get(i)));
}
// evaluate if the explanations match the expected value
for (int i = 0; i < toExplain.size(); i++) {
// explanations shape: outputSize x nfeatures
Saliency[] explanationSaliencies = ske.explainAsync(predictions.get(i), model).get(5, TimeUnit.SECONDS).getSaliencies();
RealMatrix explanations = saliencyToMatrix(explanationSaliencies)[0];
for (int j = 0; j < explanations.getRowDimension(); j++) {
assertArrayEquals(expected[i][j], explanations.getRow(j), 1e-6);
}
}
}
use of org.kie.kogito.explainability.model.PredictionOutput in project kogito-apps by kiegroup.
the class ShapKernelExplainerTest method testParallel.
@Test
void testParallel() throws InterruptedException, ExecutionException {
// establish background data and desired data to explain
double[][] largeBackground = new double[100][10];
for (int i = 0; i < 100; i++) {
for (int j = 0; j < 10; j++) {
largeBackground[i][j] = i / 100. + j;
}
}
double[][] toExplainLargeBackground = { { 0, 1., -2., 3.5, -4.1, 5.5, -12., .8, .11, 15. } };
double[][][] expected = { { { -0.495, 0., -4.495, 0.005, -8.595, 0.005, -18.495, -6.695, -8.385, 5.505 } } };
List<PredictionInput> background = createPIFromMatrix(largeBackground);
List<PredictionInput> toExplain = createPIFromMatrix(toExplainLargeBackground);
PredictionProvider model = TestUtils.getSumSkipModel(1);
ShapConfig skConfig = testConfig.withBackground(background).build();
// initialize explainer
List<PredictionOutput> predictionOutputs = model.predictAsync(toExplain).get();
List<Prediction> predictions = new ArrayList<>();
for (int i = 0; i < predictionOutputs.size(); i++) {
predictions.add(new SimplePrediction(toExplain.get(i), predictionOutputs.get(i)));
}
// evaluate if the explanations match the expected value
ShapKernelExplainer ske = new ShapKernelExplainer(skConfig);
CompletableFuture<ShapResults> explanationsCF = ske.explainAsync(predictions.get(0), model);
ExecutorService executor = ForkJoinPool.commonPool();
executor.submit(() -> {
Saliency[] explanationSaliencies = explanationsCF.join().getSaliencies();
RealMatrix explanations = saliencyToMatrix(explanationSaliencies)[0];
assertArrayEquals(expected[0][0], explanations.getRow(0), 1e-2);
});
}
use of org.kie.kogito.explainability.model.PredictionOutput in project kogito-apps by kiegroup.
the class ShapKernelExplainerTest method testPredictionWrongSize.
// Test cases with prediction size mismatches ========================================================
@Test
void testPredictionWrongSize() throws InterruptedException, TimeoutException, ExecutionException {
// establish background data and desired data to explain
double[][] backgroundMat = new double[5][5];
for (int i = 0; i < 5; i++) {
for (int j = 0; j < 5; j++) {
backgroundMat[i][j] = i / 5. + j;
}
}
double[][] toExplainTooSmall = { { 0, 1., 2., 3., 4. } };
List<PredictionInput> background = createPIFromMatrix(backgroundMat);
List<PredictionInput> toExplain = createPIFromMatrix(toExplainTooSmall);
PredictionProvider modelForPredictions = TestUtils.getSumSkipTwoOutputModel(1);
PredictionProvider modelForShap = TestUtils.getSumSkipModel(1);
ShapConfig skConfig = testConfig.withBackground(background).build();
// initialize explainer
List<PredictionOutput> predictionOutputs = modelForPredictions.predictAsync(toExplain).get(5, TimeUnit.SECONDS);
List<Prediction> predictions = new ArrayList<>();
for (int i = 0; i < predictionOutputs.size(); i++) {
predictions.add(new SimplePrediction(toExplain.get(i), predictionOutputs.get(i)));
}
// make sure we get an illegal argument exception; our prediction to explain has a different shape t
// than the background predictions will
Prediction p = predictions.get(0);
ShapKernelExplainer ske = new ShapKernelExplainer(skConfig);
assertThrows(ExecutionException.class, () -> ske.explainAsync(p, modelForShap).get());
}
Aggregations