use of org.apache.flink.shaded.guava30.com.google.common.collect.Maps in project presto by prestodb.
the class TestSelectiveOrcReader method testMaps.
@Test
public void testMaps() throws Exception {
Random random = new Random(0);
tester.testRoundTrip(mapType(INTEGER, INTEGER), createList(NUM_ROWS, i -> createMap(i)));
// map column with no nulls
tester.testRoundTripTypes(ImmutableList.of(INTEGER, mapType(INTEGER, INTEGER)), ImmutableList.of(createList(NUM_ROWS, i -> random.nextInt()), createList(NUM_ROWS, i -> createMap(i))), toSubfieldFilters(ImmutableMap.of(0, BigintRange.of(0, Integer.MAX_VALUE, false)), ImmutableMap.of(1, IS_NOT_NULL), ImmutableMap.of(1, IS_NULL)));
// map column with nulls
tester.testRoundTripTypes(ImmutableList.of(INTEGER, mapType(INTEGER, INTEGER)), ImmutableList.of(createList(NUM_ROWS, i -> random.nextInt()), createList(NUM_ROWS, i -> i % 5 == 0 ? null : createMap(i))), toSubfieldFilters(ImmutableMap.of(0, BigintRange.of(0, Integer.MAX_VALUE, false)), ImmutableMap.of(1, IS_NOT_NULL), ImmutableMap.of(1, IS_NULL), ImmutableMap.of(0, BigintRange.of(0, Integer.MAX_VALUE, false), 1, IS_NULL), ImmutableMap.of(0, BigintRange.of(0, Integer.MAX_VALUE, false), 1, IS_NOT_NULL)));
// map column with filter, followed by another column with filter
tester.testRoundTripTypes(ImmutableList.of(mapType(INTEGER, INTEGER), INTEGER), ImmutableList.of(createList(NUM_ROWS, i -> i % 5 == 0 ? null : createMap(i)), createList(NUM_ROWS, i -> random.nextInt())), toSubfieldFilters(ImmutableMap.of(0, IS_NULL, 1, BigintRange.of(0, Integer.MAX_VALUE, false)), ImmutableMap.of(0, IS_NOT_NULL, 1, BigintRange.of(0, Integer.MAX_VALUE, false))));
// empty maps
tester.testRoundTripTypes(ImmutableList.of(INTEGER, mapType(INTEGER, INTEGER)), ImmutableList.of(createList(NUM_ROWS, i -> random.nextInt()), Collections.nCopies(NUM_ROWS, ImmutableMap.of())), ImmutableList.of());
// read selected positions from all nulls map column
tester.testRoundTripTypes(ImmutableList.of(INTEGER, mapType(INTEGER, INTEGER)), ImmutableList.of(createList(NUM_ROWS, i -> random.nextInt(10)), createList(NUM_ROWS, i -> null)), toSubfieldFilters(ImmutableMap.of(0, BigintRange.of(0, 5, false))));
}
use of org.apache.flink.shaded.guava30.com.google.common.collect.Maps in project cas by apereo.
the class BaseOidcScopeAttributeReleasePolicy method getAttributesInternal.
@Override
public Map<String, List<Object>> getAttributesInternal(final RegisteredServiceAttributeReleasePolicyContext context, final Map<String, List<Object>> attributes) {
val applicationContext = ApplicationContextProvider.getApplicationContext();
if (applicationContext == null) {
LOGGER.warn("Could not locate the application context to process attributes");
return new HashMap<>(0);
}
val resolvedAttributes = new TreeMap<String, List<Object>>(String.CASE_INSENSITIVE_ORDER);
resolvedAttributes.putAll(attributes);
val attributesToRelease = Maps.<String, List<Object>>newHashMapWithExpectedSize(attributes.size());
LOGGER.debug("Attempting to map and filter claims based on resolved attributes [{}]", resolvedAttributes);
val properties = applicationContext.getBean(CasConfigurationProperties.class);
val supportedClaims = properties.getAuthn().getOidc().getDiscovery().getClaims();
val allowedClaims = new LinkedHashSet<>(getAllowedAttributes());
allowedClaims.retainAll(supportedClaims);
LOGGER.debug("[{}] is designed to allow claims [{}] for scope [{}]. After cross-checking with " + "supported claims [{}], the final collection of allowed attributes is [{}]", getClass().getSimpleName(), getAllowedAttributes(), getScopeType(), supportedClaims, allowedClaims);
allowedClaims.stream().map(claim -> mapClaimToAttribute(claim, resolvedAttributes)).filter(p -> p.getValue() != null).forEach(p -> attributesToRelease.put(p.getKey(), CollectionUtils.toCollection(p.getValue(), ArrayList.class)));
return attributesToRelease;
}
use of org.apache.flink.shaded.guava30.com.google.common.collect.Maps in project SONG by overture-stack.
the class AnalysisServiceTest method testGetAnalysisAndIdSearch.
@Test
public void testGetAnalysisAndIdSearch() {
val studyGenerator = createStudyGenerator(studyService, randomGenerator);
val studyId = studyGenerator.createRandomStudy();
val analysisGenerator = createAnalysisGenerator(studyId, service, payloadGenerator);
val numAnalysis = 10;
val sraMap = Maps.<String, SequencingReadAnalysis>newHashMap();
val vcaMap = Maps.<String, VariantCallAnalysis>newHashMap();
val expectedAnalyses = Sets.<Analysis>newHashSet();
for (int i = 1; i <= numAnalysis; i++) {
if (i % 2 == 0) {
val sra = analysisGenerator.createDefaultRandomSequencingReadAnalysis();
assertThat(sraMap.containsKey(sra.getAnalysisId())).isFalse();
sraMap.put(sra.getAnalysisId(), sra);
expectedAnalyses.add(sra);
} else {
val vca = analysisGenerator.createDefaultRandomVariantCallAnalysis();
assertThat(sraMap.containsKey(vca.getAnalysisId())).isFalse();
vcaMap.put(vca.getAnalysisId(), vca);
expectedAnalyses.add(vca);
}
}
assertThat(expectedAnalyses).hasSize(numAnalysis);
assertThat(sraMap.keySet().size() + vcaMap.keySet().size()).isEqualTo(numAnalysis);
val expectedVCAs = newHashSet(vcaMap.values());
val expectedSRAs = newHashSet(sraMap.values());
assertThat(expectedSRAs).hasSize(sraMap.keySet().size());
assertThat(expectedVCAs).hasSize(vcaMap.keySet().size());
val actualAnalyses = service.getAnalysis(studyId);
val actualSRAs = actualAnalyses.stream().filter(x -> resolveAnalysisType(x.getAnalysisType()) == SEQUENCING_READ).collect(toSet());
val actualVCAs = actualAnalyses.stream().filter(x -> resolveAnalysisType(x.getAnalysisType()) == VARIANT_CALL).collect(toSet());
assertThat(actualSRAs).hasSize(sraMap.keySet().size());
assertThat(actualVCAs).hasSize(vcaMap.keySet().size());
assertThat(actualSRAs).containsAll(expectedSRAs);
assertThat(actualVCAs).containsAll(expectedVCAs);
// Do a study-wide idSearch and verify the response effectively has the same
// number of results as the getAnalysis method
val searchedAnalyses = service.idSearch(studyId, createIdSearchRequest(null, null, null, null));
assertThat(searchedAnalyses).hasSameSizeAs(expectedAnalyses);
assertThat(searchedAnalyses).containsOnlyElementsOf(expectedAnalyses);
}
use of org.apache.flink.shaded.guava30.com.google.common.collect.Maps in project SONG by overture-stack.
the class ExportServiceTest method runFullLoopTest.
/**
* Given that 0 < numStudies < numAnalysesPerStudy, this test ensures that if the export service is requested for ONE
* analysisId from EACH study, the response will return a list of size {@code numStudies}, where each element (which is
* of type ExportedPayload) has exactly ONE payload (since there is only one analysis per study). This is a 2 in 1 test:
* - it verifies the conversion of an existing analysis to a payload is correct, by submitting the
* converted payload and comparing it with the original analysis
* - it verifies the aggregation functionality of the export service, when given analysisIds belonging to
* different studies
*/
private void runFullLoopTest(Class<? extends Analysis> analysisClass, int numStudies, int numAnalysesPerStudy) {
val includeOtherIds = false;
val includeAnalysisId = true;
// Check the right parameters for this test are set
assertCorrectConfig(numStudies, numAnalysesPerStudy);
// Generate studies and there associated analyses
val data = generateData(analysisClass, numStudies, numAnalysesPerStudy, includeAnalysisId, true);
// [REDUCTION_TAG] Reduce the data so that there is one analysis for each study
val reducedData = Maps.<String, Analysis>newHashMap();
data.entrySet().stream().filter(e -> !reducedData.containsKey(e.getKey())).forEach(e -> {
String studyId = e.getKey();
List<? extends Analysis> analyses = e.getValue();
int numAnalyses = analyses.size();
int randomAnalysisPos = randomGenerator.generateRandomIntRange(0, numAnalyses);
Analysis randomAnalysis = analyses.get(randomAnalysisPos);
reducedData.put(studyId, randomAnalysis);
});
assertThat(reducedData.keySet()).hasSize(numStudies);
assertThat(reducedData.values()).hasSize(numStudies);
// Create a list of analysisIds that covers all the previously generated studies
val requestedAnalysisIds = reducedData.values().stream().map(Analysis::getAnalysisId).collect(toImmutableList());
assertThat(requestedAnalysisIds).hasSize(numStudies);
// Export the analysis for the requested analysisIds
val exportedPayloads = exportService.exportPayload(requestedAnalysisIds, includeAnalysisId, includeOtherIds);
// There should be an ExportedPayload object for each study
assertThat(exportedPayloads).hasSize(numStudies);
for (val exportedPayload : exportedPayloads) {
// There should be only 1 analysis for each study. Refer to the comment with REDUCTION_TAG.
val studyId = exportedPayload.getStudyId();
assertThat(exportedPayload.getPayloads()).hasSize(1);
val payload = exportedPayload.getPayloads().get(0);
val expectedAnalysis = reducedData.get(studyId);
// Delete the previously created analysis so it can be created using the uploadService (frontdoor creation)
deleteAnalysis(expectedAnalysis);
// Depending on includeAnalysisId's value, either remove or keep the analysisId. Always keep the
// other ids, so that it can be compared to the re-create analysis after submission
massageAnalysisInplace(expectedAnalysis, includeAnalysisId, true);
// Submit payload. Should create the same "otherIds" as the expected
val actualAnalysis = submitPayload(studyId, payload, analysisClass);
// Assert output analysis is correct
assertAnalysis(actualAnalysis, expectedAnalysis);
}
}
use of org.apache.flink.shaded.guava30.com.google.common.collect.Maps in project atomix by atomix.
the class ConsistentMapProxyBuilder method buildAsync.
@Override
@SuppressWarnings("unchecked")
public CompletableFuture<ConsistentMap<K, V>> buildAsync() {
PrimitiveProtocol protocol = protocol();
PartitionGroup<?> partitions = managementService.getPartitionService().getPartitionGroup(protocol);
Map<PartitionId, CompletableFuture<AsyncConsistentMap<byte[], byte[]>>> maps = Maps.newConcurrentMap();
for (Partition partition : partitions.getPartitions()) {
maps.put(partition.id(), partition.getPrimitiveClient().newProxy(name(), primitiveType(), protocol).connect().thenApply(proxy -> new TranscodingAsyncConsistentMap<>(new ConsistentMapProxy(proxy), BaseEncoding.base16()::encode, BaseEncoding.base16()::decode, Function.identity(), Function.identity())));
}
Partitioner<byte[]> partitioner = key -> {
int bucket = Math.abs(Hashing.murmur3_32().hashBytes(key).asInt()) % NUM_BUCKETS;
return partitions.getPartitionIds().get(Hashing.consistentHash(bucket, partitions.getPartitionIds().size()));
};
return Futures.allOf(Lists.newArrayList(maps.values())).thenApply(m -> {
AsyncConsistentMap<byte[], byte[]> partitionedMap = new PartitionedAsyncConsistentMap<>(name(), Maps.transformValues(maps, v -> v.getNow(null)), partitioner);
Serializer serializer = serializer();
AsyncConsistentMap<K, V> map = new TranscodingAsyncConsistentMap<>(partitionedMap, key -> serializer.encode(key), bytes -> serializer.decode(bytes), value -> value == null ? null : serializer.encode(value), bytes -> serializer.decode(bytes));
if (!nullValues()) {
map = new NotNullAsyncConsistentMap<>(map);
}
if (relaxedReadConsistency()) {
map = new CachingAsyncConsistentMap<>(map);
}
if (readOnly()) {
map = new UnmodifiableAsyncConsistentMap<>(map);
}
return map.sync();
});
}
Aggregations