use of org.apache.commons.collections4.multimap.ArrayListValuedHashMap in project herd by FINRAOS.
the class StorageFileDaoImpl method getStorageFilePathsByStorageUnitIds.
@Override
public MultiValuedMap<Integer, String> getStorageFilePathsByStorageUnitIds(List<Integer> storageUnitIds) {
// Create a map that can hold a collection of values against each key.
MultiValuedMap<Integer, String> result = new ArrayListValuedHashMap<>();
// Retrieve the pagination size for the storage file paths query configured in the system.
Integer paginationSize = configurationHelper.getProperty(ConfigurationValue.STORAGE_FILE_PATHS_QUERY_PAGINATION_SIZE, Integer.class);
// Create the criteria builder and the criteria.
CriteriaBuilder builder = entityManager.getCriteriaBuilder();
CriteriaQuery<Tuple> criteria = builder.createTupleQuery();
// The criteria root is the storage file.
Root<StorageFileEntity> storageFileEntity = criteria.from(StorageFileEntity.class);
// Get the columns.
Path<Integer> storageUnitIdColumn = storageFileEntity.get(StorageFileEntity_.storageUnitId);
Path<String> storageFilePathColumn = storageFileEntity.get(StorageFileEntity_.path);
// Create the standard restrictions (i.e. the standard where clauses).
Predicate queryRestriction = getPredicateForInClause(builder, storageUnitIdColumn, storageUnitIds);
// Add the select clause.
criteria.multiselect(storageUnitIdColumn, storageFilePathColumn);
// Add the where clause.
criteria.where(queryRestriction);
// Execute the query using pagination and populate the result map.
int startPosition = 0;
while (true) {
// Run the query to get a list of tuples back.
List<Tuple> tuples = entityManager.createQuery(criteria).setFirstResult(startPosition).setMaxResults(paginationSize).getResultList();
// Populate the result map from the returned tuples (i.e. 1 tuple for each row).
for (Tuple tuple : tuples) {
// Extract the tuple values.
Integer storageUnitId = tuple.get(storageUnitIdColumn);
String storageFilePath = tuple.get(storageFilePathColumn);
// Update the result map.
result.put(storageUnitId, storageFilePath);
}
// Break out of the while loop if we got less results than the pagination size.
if (tuples.size() < paginationSize) {
break;
}
// Increment the start position.
startPosition += paginationSize;
}
return result;
}
use of org.apache.commons.collections4.multimap.ArrayListValuedHashMap in project dhis2-core by dhis2.
the class DataHandler method getAggregatedDataValueMap.
/**
* Returns a mapping between dimension items and values for the given data
* query and list of indicators. The dimensional items part of the indicator
* numerators and denominators are used as dimensional item for the
* aggregated values being retrieved. In case of circular references between
* Indicators, an exception is thrown.
*
* @param params the {@link DataQueryParams}.
* @param items the list of {@link DimensionalItemObject}.
* @return a dimensional items to aggregate values map.
*/
private MultiValuedMap<String, DimensionItemObjectValue> getAggregatedDataValueMap(DataQueryParams params, List<DimensionalItemObject> items) {
if (items.isEmpty()) {
return new ArrayListValuedHashMap<>();
}
DimensionalObject dimension = new BaseDimensionalObject(DATA_X_DIM_ID, DATA_X, null, DISPLAY_NAME_DATA_X, items);
DataQueryParams dataSourceParams = newBuilder(params).replaceDimension(dimension).withMeasureCriteria(new HashMap<>()).withIncludeNumDen(false).withSkipHeaders(true).withOutputFormat(ANALYTICS).withSkipMeta(true).build();
Grid grid = dataAggregator.getAggregatedDataValueGrid(dataSourceParams);
MultiValuedMap<String, DimensionItemObjectValue> result = new ArrayListValuedHashMap<>();
if (isEmpty(grid.getRows())) {
return result;
}
// Derive the Grid indexes for data, value and period based on the first
// row of the Grid
final int dataIndex = getGridIndexByDimensionItem(grid.getRow(0), items, 0);
final int periodIndex = getGridIndexByDimensionItem(grid.getRow(0), params.getPeriods(), 1);
final int valueIndex = grid.getWidth() - 1;
final List<DimensionalItemObject> basePeriods = params.getPeriods();
for (List<Object> row : grid.getRows()) {
for (DimensionalItemObject dimensionalItem : findDimensionalItems((String) row.get(dataIndex), items)) {
if (hasPeriod(row, periodIndex)) {
addItemBasedOnPeriodOffset(result, periodIndex, valueIndex, row, dimensionalItem, basePeriods);
} else {
result.put(join(remove(row.toArray(new Object[0]), valueIndex), DIMENSION_SEP), new DimensionItemObjectValue(dimensionalItem, ((Number) row.get(valueIndex)).doubleValue()));
}
}
}
return result;
}
use of org.apache.commons.collections4.multimap.ArrayListValuedHashMap in project dhis2-core by dhis2.
the class QueryPlannerTest method testGetPermutationDimensionalItemValueMapCocEnabled.
@Test
void testGetPermutationDimensionalItemValueMapCocEnabled() {
MultiValuedMap<String, DimensionItemObjectValue> aggregatedDataMap = new ArrayListValuedHashMap<>();
aggregatedDataMap.put(makeKey(deA, coc, ouA, "2000Q1"), new DimensionItemObjectValue(deA, 1d));
aggregatedDataMap.put(makeKey(deA, coc, ouA, "2000Q2"), new DimensionItemObjectValue(deA, 2d));
aggregatedDataMap.put(makeKey(deA, coc, ouB, "2000Q1"), new DimensionItemObjectValue(deA, 3d));
aggregatedDataMap.put(makeKey(deA, coc, ouB, "2000Q2"), new DimensionItemObjectValue(deA, 4d));
aggregatedDataMap.put(makeKey(deB, coc, ouA, "2000Q1"), new DimensionItemObjectValue(deB, 5d));
aggregatedDataMap.put(makeKey(deB, coc, ouA, "2000Q2"), new DimensionItemObjectValue(deB, 6d));
aggregatedDataMap.put(makeKey(deB, coc, ouB, "2000Q1"), new DimensionItemObjectValue(deB, 7d));
aggregatedDataMap.put(makeKey(deB, coc, ouB, "2000Q2"), new DimensionItemObjectValue(deB, 8d));
// Method under test //
Map<String, List<DimensionItemObjectValue>> permutationMap = DataQueryParams.getPermutationDimensionalItemValueMap(aggregatedDataMap);
assertNotNull(permutationMap);
String ouAQ1Key = ouA.getUid() + DIMENSION_SEP + "2000Q1";
String ouAQ2Key = ouA.getUid() + DIMENSION_SEP + "2000Q2";
String ouBQ1Key = ouB.getUid() + DIMENSION_SEP + "2000Q1";
String ouBQ2Key = ouB.getUid() + DIMENSION_SEP + "2000Q2";
List<DimensionItemObjectValue> ouAQ1 = permutationMap.get(ouAQ1Key);
List<DimensionItemObjectValue> ouAQ2 = permutationMap.get(ouAQ2Key);
List<DimensionItemObjectValue> ouBQ1 = permutationMap.get(ouBQ1Key);
List<DimensionItemObjectValue> ouBQ2 = permutationMap.get(ouBQ2Key);
assertEquals(2, ouAQ1.size());
assertEquals(2, ouAQ2.size());
assertEquals(2, ouBQ1.size());
assertEquals(2, ouBQ2.size());
List<DimensionItemObjectValue> ouAQ1Expected = new ArrayList<>();
ouAQ1Expected.add(new DimensionItemObjectValue(deA, 1d));
ouAQ1Expected.add(new DimensionItemObjectValue(deB, 5d));
List<DimensionItemObjectValue> ouAQ2Expected = new ArrayList<>();
ouAQ2Expected.add(new DimensionItemObjectValue(deA, 2d));
ouAQ2Expected.add(new DimensionItemObjectValue(deB, 6d));
List<DimensionItemObjectValue> ouBQ1Expected = new ArrayList<>();
ouBQ1Expected.add(new DimensionItemObjectValue(deA, 3d));
ouBQ1Expected.add(new DimensionItemObjectValue(deB, 7d));
List<DimensionItemObjectValue> ouBQ2Expected = new ArrayList<>();
ouBQ2Expected.add(new DimensionItemObjectValue(deA, 4d));
ouBQ2Expected.add(new DimensionItemObjectValue(deB, 8d));
assertCollectionsMatch(ouAQ1Expected, ouAQ1);
assertCollectionsMatch(ouAQ2Expected, ouAQ2);
assertCollectionsMatch(ouBQ1Expected, ouBQ1);
assertCollectionsMatch(ouBQ2Expected, ouBQ2);
}
use of org.apache.commons.collections4.multimap.ArrayListValuedHashMap in project midpoint by Evolveum.
the class IndexedRelationDefinitions method initializeRelationDefinitionsByRelationName.
/**
* Removes duplicate definitions as well.
*/
@NotNull
private Map<QName, RelationDefinitionType> initializeRelationDefinitionsByRelationName(List<RelationDefinitionType> definitions) {
Map<QName, RelationDefinitionType> map = new HashMap<>();
ListValuedMap<String, QName> expansions = new ArrayListValuedHashMap<>();
for (Iterator<RelationDefinitionType> iterator = definitions.iterator(); iterator.hasNext(); ) {
RelationDefinitionType definition = iterator.next();
if (map.containsKey(definition.getRef())) {
LOGGER.error("Duplicate relation definition for '{}'; ignoring: {}", definition.getRef(), definition);
iterator.remove();
} else {
map.put(definition.getRef(), definition);
expansions.put(definition.getRef().getLocalPart(), definition.getRef());
}
}
// add entries for unqualified versions of the relation names
for (String unqualified : expansions.keySet()) {
List<QName> names = expansions.get(unqualified);
if (names.contains(new QName(unqualified))) {
// cannot expand unqualified if the expanded value is also unqualified
continue;
}
assert !names.isEmpty();
assert names.stream().allMatch(QNameUtil::isQualified);
@NotNull QName chosenExpansion;
if (names.size() == 1) {
chosenExpansion = names.get(0);
} else {
QName nameInOrgNamespace = names.stream().filter(n -> SchemaConstants.NS_ORG.equals(n.getNamespaceURI())).findFirst().orElse(null);
if (nameInOrgNamespace != null) {
// org:xxx expansion will be the default one
chosenExpansion = nameInOrgNamespace;
} else {
chosenExpansion = names.get(0);
LOGGER.warn("Multiple resolutions of unqualified relation name '{}' ({}); " + "using the first one as default: '{}'. Please reconsider this as it could lead to " + "unpredictable behavior.", unqualified, names, chosenExpansion);
}
}
assert QNameUtil.isQualified(chosenExpansion);
map.put(new QName(unqualified), map.get(chosenExpansion));
}
return map;
}
use of org.apache.commons.collections4.multimap.ArrayListValuedHashMap in project hive by apache.
the class ParallelEdgeFixer method fixParallelEdges.
private void fixParallelEdges(OperatorGraph og) throws SemanticException {
// Identify edge operators
ListValuedMap<Pair<Cluster, Cluster>, Pair<Operator<?>, Operator<?>>> edgeOperators = new ArrayListValuedHashMap<>();
for (Cluster c : og.getClusters()) {
for (Operator<?> o : c.getMembers()) {
for (Operator<? extends OperatorDesc> p : o.getParentOperators()) {
Cluster parentCluster = og.clusterOf(p);
if (parentCluster == c) {
continue;
}
edgeOperators.put(new Pair<>(parentCluster, c), new Pair<>(p, o));
}
}
}
// process all edges and fix parallel edges if there are any
for (Pair<Cluster, Cluster> key : edgeOperators.keySet()) {
List<Pair<Operator<?>, Operator<?>>> values = edgeOperators.get(key);
if (values.size() <= 1) {
continue;
}
// operator order must in stabile order - or we end up with falky plans causing flaky tests...
values.sort(new OperatorPairComparator());
// remove one optionally unsupported edge (it will be kept as is)
removeOneEdge(values);
Iterator<Pair<Operator<?>, Operator<?>>> it = values.iterator();
while (it.hasNext()) {
Pair<Operator<?>, Operator<?>> pair = it.next();
fixParallelEdge(pair.left, pair.right);
}
}
}
Aggregations