Search in sources :

Example 16 with Value

use of org.kie.kogito.explainability.model.Value in project kogito-apps by kiegroup.

the class FairnessMetrics method individualConsistency.

/**
 * Calculate individual fairness in terms of consistency of predictions across similar inputs.
 *
 * @param proximityFunction a function that finds the top k similar inputs, given a reference input and a list of inputs
 * @param samples a list of inputs to be tested for consistency
 * @param predictionProvider the model under inspection
 * @return the consistency measure
 * @throws ExecutionException if any error occurs during model prediction
 * @throws InterruptedException if timeout or other interruption issues occur during model prediction
 */
public static double individualConsistency(BiFunction<PredictionInput, List<PredictionInput>, List<PredictionInput>> proximityFunction, List<PredictionInput> samples, PredictionProvider predictionProvider) throws ExecutionException, InterruptedException {
    double consistency = 1;
    for (PredictionInput input : samples) {
        List<PredictionOutput> predictionOutputs = predictionProvider.predictAsync(List.of(input)).get();
        PredictionOutput predictionOutput = predictionOutputs.get(0);
        List<PredictionInput> neighbors = proximityFunction.apply(input, samples);
        List<PredictionOutput> neighborsOutputs = predictionProvider.predictAsync(neighbors).get();
        for (Output output : predictionOutput.getOutputs()) {
            Value originalValue = output.getValue();
            for (PredictionOutput neighborOutput : neighborsOutputs) {
                Output currentOutput = neighborOutput.getByName(output.getName()).orElse(null);
                if (currentOutput != null && !originalValue.equals(currentOutput.getValue())) {
                    consistency -= 1f / (neighbors.size() * predictionOutput.getOutputs().size() * samples.size());
                }
            }
        }
    }
    return consistency;
}
Also used : PredictionInput(org.kie.kogito.explainability.model.PredictionInput) PredictionOutput(org.kie.kogito.explainability.model.PredictionOutput) Output(org.kie.kogito.explainability.model.Output) PredictionOutput(org.kie.kogito.explainability.model.PredictionOutput) Value(org.kie.kogito.explainability.model.Value)

Example 17 with Value

use of org.kie.kogito.explainability.model.Value in project kogito-apps by kiegroup.

the class PartialDependencePlotExplainer method updateValueCounts.

private void updateValueCounts(List<Map<Value, Long>> valueCounts, int featureValueIndex, Output output) {
    Value categoricalOutput = output.getValue();
    if (valueCounts.size() <= featureValueIndex) {
        Map<Value, Long> classCount = new HashMap<>();
        classCount.put(categoricalOutput, 1L);
        valueCounts.add(classCount);
    } else {
        Map<Value, Long> classCount = valueCounts.get(featureValueIndex);
        if (classCount.containsKey(categoricalOutput)) {
            classCount.put(categoricalOutput, classCount.get(categoricalOutput) + 1);
        } else {
            classCount.put(categoricalOutput, 1L);
        }
        valueCounts.set(featureValueIndex, classCount);
    }
}
Also used : HashMap(java.util.HashMap) Value(org.kie.kogito.explainability.model.Value)

Example 18 with Value

use of org.kie.kogito.explainability.model.Value in project kogito-apps by kiegroup.

the class DatasetEncoder method getColumnData.

private List<List<double[]>> getColumnData(List<PredictionInput> perturbedInputs, EncodingParams params) {
    List<List<double[]>> columnData = new LinkedList<>();
    for (int t = 0; t < targetInputFeatures.size(); t++) {
        Feature targetFeature = targetInputFeatures.get(t);
        int finalT = t;
        // encode all inputs with respect to the target, based on their type
        List<double[]> encode = targetFeature.getType().encode(params, targetFeature.getValue(), perturbedInputs.stream().map(predictionInput -> predictionInput.getFeatures().get(finalT).getValue()).toArray(Value[]::new));
        columnData.add(encode);
    }
    return columnData;
}
Also used : Value(org.kie.kogito.explainability.model.Value) List(java.util.List) LinkedList(java.util.LinkedList) Feature(org.kie.kogito.explainability.model.Feature) LinkedList(java.util.LinkedList)

Example 19 with Value

use of org.kie.kogito.explainability.model.Value in project kogito-apps by kiegroup.

the class LimeExplainer method prepareInputs.

/**
 * Check the perturbed inputs so that the dataset of perturbed input / outputs contains more than just one output
 * class, otherwise it would be impossible to linearly separate it, and hence learn meaningful weights to be used as
 * feature importance scores.
 * The check can be {@code strict} or not, if so it will throw a {@code DatasetNotSeparableException} when the dataset
 * for a given output is not separable.
 */
private LimeInputs prepareInputs(List<PredictionInput> perturbedInputs, List<PredictionOutput> perturbedOutputs, List<Feature> linearizedTargetInputFeatures, int o, Output currentOutput, boolean strict) {
    if (currentOutput.getValue() != null && currentOutput.getValue().getUnderlyingObject() != null) {
        Map<Double, Long> rawClassesBalance;
        // calculate the no. of samples belonging to each output class
        Value fv = currentOutput.getValue();
        rawClassesBalance = getClassBalance(perturbedOutputs, fv, o);
        Long max = rawClassesBalance.values().stream().max(Long::compareTo).orElse(1L);
        double separationRatio = (double) max / (double) perturbedInputs.size();
        List<Output> outputs = perturbedOutputs.stream().map(po -> po.getOutputs().get(o)).collect(Collectors.toList());
        boolean classification = rawClassesBalance.size() == 2;
        if (strict) {
            // check if the dataset is separable and also if the linear model should fit a regressor or a classifier
            if (rawClassesBalance.size() > 1 && separationRatio < limeConfig.getSeparableDatasetRatio()) {
                // if dataset creation process succeeds use it to train the linear model
                return new LimeInputs(classification, linearizedTargetInputFeatures, currentOutput, perturbedInputs, outputs);
            } else {
                throw new DatasetNotSeparableException(currentOutput, rawClassesBalance);
            }
        } else {
            LOGGER.warn("Using an hardly separable dataset for output '{}' of type '{}' with value '{}' ({})", currentOutput.getName(), currentOutput.getType(), currentOutput.getValue(), rawClassesBalance);
            return new LimeInputs(classification, linearizedTargetInputFeatures, currentOutput, perturbedInputs, outputs);
        }
    } else {
        return new LimeInputs(false, linearizedTargetInputFeatures, currentOutput, emptyList(), emptyList());
    }
}
Also used : Arrays(java.util.Arrays) PerturbationContext(org.kie.kogito.explainability.model.PerturbationContext) Feature(org.kie.kogito.explainability.model.Feature) Prediction(org.kie.kogito.explainability.model.Prediction) CompletableFuture.completedFuture(java.util.concurrent.CompletableFuture.completedFuture) LoggerFactory(org.slf4j.LoggerFactory) HashMap(java.util.HashMap) CompletableFuture(java.util.concurrent.CompletableFuture) Value(org.kie.kogito.explainability.model.Value) DataDistribution(org.kie.kogito.explainability.model.DataDistribution) Saliency(org.kie.kogito.explainability.model.Saliency) ArrayList(java.util.ArrayList) LinearModel(org.kie.kogito.explainability.utils.LinearModel) Pair(org.apache.commons.lang3.tuple.Pair) Map(java.util.Map) FeatureDistribution(org.kie.kogito.explainability.model.FeatureDistribution) PredictionOutput(org.kie.kogito.explainability.model.PredictionOutput) DataUtils(org.kie.kogito.explainability.utils.DataUtils) Logger(org.slf4j.Logger) LocalExplainer(org.kie.kogito.explainability.local.LocalExplainer) Collections.emptyList(java.util.Collections.emptyList) FeatureImportance(org.kie.kogito.explainability.model.FeatureImportance) LocalExplanationException(org.kie.kogito.explainability.local.LocalExplanationException) Collectors(java.util.stream.Collectors) Type(org.kie.kogito.explainability.model.Type) PredictionProvider(org.kie.kogito.explainability.model.PredictionProvider) Objects(java.util.Objects) Consumer(java.util.function.Consumer) PredictionInput(org.kie.kogito.explainability.model.PredictionInput) List(java.util.List) Output(org.kie.kogito.explainability.model.Output) Optional(java.util.Optional) PredictionOutput(org.kie.kogito.explainability.model.PredictionOutput) Output(org.kie.kogito.explainability.model.Output) Value(org.kie.kogito.explainability.model.Value)

Example 20 with Value

use of org.kie.kogito.explainability.model.Value in project kogito-apps by kiegroup.

the class RemotePredictionProviderTest method toMap.

@Test
void toMap() {
    // simple test
    Feature simple = new Feature("simple", Type.NUMBER, new Value(10));
    Feature undefined = new Feature("undefined", Type.UNDEFINED, new Value(simple));
    Feature composite = new Feature("composite", Type.COMPOSITE, new Value(asList(simple, undefined)));
    List<Feature> features1 = asList(simple, undefined, composite);
    Map<String, Object> result1 = predictionProvider.toMap(features1);
    assertNotNull(result1);
    assertEquals(features1.size(), result1.size());
    assertTrue(result1.containsKey("simple"));
    assertEquals(10, result1.get("simple"));
    assertTrue(result1.containsKey("undefined"));
    assertTrue(result1.containsKey("composite"));
    assertTrue(result1.get("composite") instanceof Map);
    // context test
    Feature context = new Feature("context", Type.COMPOSITE, new Value(singletonList(simple)));
    Feature simple2 = new Feature("simple2", Type.BOOLEAN, new Value(true));
    List<Feature> features2 = asList(simple2, context);
    Map<String, Object> result2 = predictionProvider.toMap(features2);
    assertNotNull(result2);
    assertEquals(1, result2.size());
    assertTrue(result2.containsKey("simple"));
    assertFalse(result2.containsKey("simple2"));
    // multiple nesting test
    Feature nestedComposite = new Feature("nestedComposite", Type.COMPOSITE, new Value(asList(simple, composite)));
    List<Feature> features3 = asList(simple, nestedComposite);
    Map<String, Object> result3 = predictionProvider.toMap(features3);
    assertNotNull(result3);
    assertEquals(features3.size(), result3.size());
    assertTrue(result3.containsKey("simple"));
    assertEquals(10, result3.get("simple"));
    assertTrue(result3.containsKey("nestedComposite"));
    assertTrue(result3.get("nestedComposite") instanceof Map);
    @SuppressWarnings("unchecked") Map<String, Object> nestedCompositeMap = (Map<String, Object>) result3.get("nestedComposite");
    assertEquals(2, nestedCompositeMap.size());
    assertTrue(nestedCompositeMap.containsKey("simple"));
    assertEquals(10, nestedCompositeMap.get("simple"));
    assertTrue(nestedCompositeMap.containsKey("composite"));
    assertTrue(nestedCompositeMap.get("composite") instanceof Map);
}
Also used : Value(org.kie.kogito.explainability.model.Value) JsonObject(io.vertx.core.json.JsonObject) Feature(org.kie.kogito.explainability.model.Feature) Collections.emptyMap(java.util.Collections.emptyMap) Map(java.util.Map) Collections.singletonMap(java.util.Collections.singletonMap) Test(org.junit.jupiter.api.Test)

Aggregations

Value (org.kie.kogito.explainability.model.Value)80 Feature (org.kie.kogito.explainability.model.Feature)69 Output (org.kie.kogito.explainability.model.Output)59 PredictionOutput (org.kie.kogito.explainability.model.PredictionOutput)54 PredictionInput (org.kie.kogito.explainability.model.PredictionInput)49 ArrayList (java.util.ArrayList)42 PredictionProvider (org.kie.kogito.explainability.model.PredictionProvider)42 LinkedList (java.util.LinkedList)36 Type (org.kie.kogito.explainability.model.Type)36 Test (org.junit.jupiter.api.Test)35 List (java.util.List)33 Prediction (org.kie.kogito.explainability.model.Prediction)33 Random (java.util.Random)31 ParameterizedTest (org.junit.jupiter.params.ParameterizedTest)23 Arrays (java.util.Arrays)16 Map (java.util.Map)16 Optional (java.util.Optional)16 CounterfactualEntity (org.kie.kogito.explainability.local.counterfactual.entities.CounterfactualEntity)16 FeatureFactory (org.kie.kogito.explainability.model.FeatureFactory)16 Collectors (java.util.stream.Collectors)15