use of uk.gov.gchq.koryphe.tuple.n.Tuple3 in project Gaffer by gchq.
the class ElementAggregatorTest method shouldAggregatePropertiesWithMultipleSelection.
@Test
public void shouldAggregatePropertiesWithMultipleSelection() {
// Given
final BinaryOperator<Tuple3<Integer, Integer, Integer>> maxMinRange = (t1, t2) -> new Tuple3<>(Math.max(t1.get0(), t2.get0()), Math.min(t1.get1(), t2.get1()), Math.max(t1.get0(), t2.get0()) - Math.min(t1.get1(), t2.get1()));
final ElementAggregator aggregator = new ElementAggregator.Builder().select("max", "min", "range").execute(maxMinRange).build();
final Properties properties1 = new Properties();
properties1.put("max", 10);
properties1.put("min", 10);
final Properties properties2 = new Properties();
properties2.put("max", 100);
properties2.put("min", 100);
final Properties properties3 = new Properties();
properties3.put("max", 1000);
properties3.put("min", 1000);
// When
Properties state = aggregator.apply(properties1, properties2);
state = aggregator.apply(state, properties3);
// Then
assertThat(state).hasFieldOrPropertyWithValue("max", 1000).hasFieldOrPropertyWithValue("min", 10).hasFieldOrPropertyWithValue("range", 990);
}
use of uk.gov.gchq.koryphe.tuple.n.Tuple3 in project Gaffer by gchq.
the class SampleDataForSplitPoints method validate.
@Override
public ValidationResult validate() {
final ValidationResult result = Operation.super.validate();
result.add(FieldUtil.validateRequiredFields(new Tuple3<>("proportionToSample must be greater than 0", proportionToSample, new IsMoreThan(0f))));
return result;
}
use of uk.gov.gchq.koryphe.tuple.n.Tuple3 in project Gaffer by gchq.
the class QueryGenerator method getPathsAndFiltersForGetElements.
private ParquetQuery getPathsAndFiltersForGetElements(final GetElements getElements) throws SerialisationException, OperationException {
final Iterable<? extends ElementId> seeds = getElements.getInput();
if (null == seeds || !seeds.iterator().hasNext()) {
return new ParquetQuery();
}
// Stage 1: Use the view to identify all groups that might contain data
final Set<String> allRelevantGroups = getRelevantGroups(getElements.getView());
// Stage 2: For each of the above groups, create a Parquet predicate from the view and directedType
final Map<String, Pair<FilterPredicate, Boolean>> groupToPredicate = new HashMap<>();
for (final String group : allRelevantGroups) {
Pair<FilterPredicate, Boolean> filter = getPredicateFromView(getElements.getView(), group, schemaUtils.getEntityGroups().contains(group));
if (schemaUtils.getEdgeGroups().contains(group)) {
final FilterPredicate directedTypeFilter = getPredicateFromDirectedType(getElements.getDirectedType());
filter.setFirst(FilterPredicateUtils.and(filter.getFirst(), directedTypeFilter));
}
groupToPredicate.put(group, filter);
}
// Stage 3: Convert seeds to ParquetElementSeeds and create Stream of <group, ParquetElementSeed> pairs where
// each seed appears once for each of the relevant groups
final Stream<Pair<String, ParquetElementSeed>> groupAndSeeds = StreamSupport.stream(seeds.spliterator(), false).flatMap(seed -> {
try {
return seedToParquetObject(seed, allRelevantGroups).stream();
} catch (final SerialisationException e) {
throw new RuntimeException("SerialisationException converting seed into a Parquet object", e);
}
});
// Stage 4: Convert stream of <group, ParquetElementSeed> pars to stream of tuples
// <group, ParquetElementSeed, List<PathInfo>>
final Stream<Tuple3<String, ParquetElementSeed, Set<PathInfo>>> groupSeedsAndPaths = groupAndSeeds.map(pair -> getRelevantFiles(pair.getFirst(), pair.getSecond()));
// Stage 5: Create map from path to list of <group, reversed edge flag, Parquet seeds>
// TODO: Currently this consumes the entire stream - need to do this in batches
final List<Tuple3<String, ParquetElementSeed, Set<PathInfo>>> groupSeedsAndPathsList = groupSeedsAndPaths.collect(Collectors.toList());
final Map<PathInfo, List<Tuple3<String, Boolean, ParquetElementSeed>>> pathToSeeds = new HashMap<>();
for (final Tuple3<String, ParquetElementSeed, Set<PathInfo>> tuple : groupSeedsAndPathsList) {
Set<PathInfo> paths = tuple.get2();
for (final PathInfo pathInfo : paths) {
if (!pathToSeeds.containsKey(pathInfo)) {
pathToSeeds.put(pathInfo, new ArrayList<>());
}
pathToSeeds.get(pathInfo).add(new Tuple3<>(tuple.get0(), pathInfo.isReversed(), tuple.get1()));
}
}
// Stage 6: Create ParquetQuery
final SeededGraphFilters.IncludeIncomingOutgoingType includeIncomingOutgoingType = getElements.getIncludeIncomingOutGoing();
final SeedMatching.SeedMatchingType seedMatchingType = getElements.getSeedMatching();
final ParquetQuery parquetQuery = new ParquetQuery();
for (final PathInfo pathInfo : pathToSeeds.keySet()) {
List<Tuple3<String, Boolean, ParquetElementSeed>> seedList = pathToSeeds.get(pathInfo);
FilterPredicate filterPredicate = seedsToPredicate(seedList, includeIncomingOutgoingType, seedMatchingType);
if (null != filterPredicate) {
final String group = pathInfo.getGroup();
final Pair<FilterPredicate, Boolean> viewFilterPredicate = groupToPredicate.get(group);
if (null != viewFilterPredicate) {
// Put view predicate first as filter for checking whether it matches one of many seeds could be complex
filterPredicate = FilterPredicateUtils.and(viewFilterPredicate.getFirst(), filterPredicate);
}
final ParquetFileQuery fileQuery = new ParquetFileQuery(pathInfo.getPath(), filterPredicate, viewFilterPredicate.getSecond());
parquetQuery.add(group, fileQuery);
}
}
LOGGER.info("Created ParquetQuery of {}", parquetQuery);
return parquetQuery;
}
Aggregations