use of org.apache.flink.shaded.guava30.com.google.common.collect.Sets in project SONG by overture-stack.
the class AnalysisServiceTest method testGetAnalysisAndIdSearch.
@Test
public void testGetAnalysisAndIdSearch() {
val studyGenerator = createStudyGenerator(studyService, randomGenerator);
val studyId = studyGenerator.createRandomStudy();
val analysisGenerator = createAnalysisGenerator(studyId, service, payloadGenerator);
val numAnalysis = 10;
val sraMap = Maps.<String, SequencingReadAnalysis>newHashMap();
val vcaMap = Maps.<String, VariantCallAnalysis>newHashMap();
val expectedAnalyses = Sets.<Analysis>newHashSet();
for (int i = 1; i <= numAnalysis; i++) {
if (i % 2 == 0) {
val sra = analysisGenerator.createDefaultRandomSequencingReadAnalysis();
assertThat(sraMap.containsKey(sra.getAnalysisId())).isFalse();
sraMap.put(sra.getAnalysisId(), sra);
expectedAnalyses.add(sra);
} else {
val vca = analysisGenerator.createDefaultRandomVariantCallAnalysis();
assertThat(sraMap.containsKey(vca.getAnalysisId())).isFalse();
vcaMap.put(vca.getAnalysisId(), vca);
expectedAnalyses.add(vca);
}
}
assertThat(expectedAnalyses).hasSize(numAnalysis);
assertThat(sraMap.keySet().size() + vcaMap.keySet().size()).isEqualTo(numAnalysis);
val expectedVCAs = newHashSet(vcaMap.values());
val expectedSRAs = newHashSet(sraMap.values());
assertThat(expectedSRAs).hasSize(sraMap.keySet().size());
assertThat(expectedVCAs).hasSize(vcaMap.keySet().size());
val actualAnalyses = service.getAnalysis(studyId);
val actualSRAs = actualAnalyses.stream().filter(x -> resolveAnalysisType(x.getAnalysisType()) == SEQUENCING_READ).collect(toSet());
val actualVCAs = actualAnalyses.stream().filter(x -> resolveAnalysisType(x.getAnalysisType()) == VARIANT_CALL).collect(toSet());
assertThat(actualSRAs).hasSize(sraMap.keySet().size());
assertThat(actualVCAs).hasSize(vcaMap.keySet().size());
assertThat(actualSRAs).containsAll(expectedSRAs);
assertThat(actualVCAs).containsAll(expectedVCAs);
// Do a study-wide idSearch and verify the response effectively has the same
// number of results as the getAnalysis method
val searchedAnalyses = service.idSearch(studyId, createIdSearchRequest(null, null, null, null));
assertThat(searchedAnalyses).hasSameSizeAs(expectedAnalyses);
assertThat(searchedAnalyses).containsOnlyElementsOf(expectedAnalyses);
}
use of org.apache.flink.shaded.guava30.com.google.common.collect.Sets in project atlasdb by palantir.
the class CassandraClientPoolImpl method sanityCheckRingConsistency.
// This method exists to verify a particularly nasty bug where cassandra doesn't have a
// consistent ring across all of it's nodes. One node will think it owns more than the others
// think it does and they will not send writes to it, but it will respond to requests
// acting like it does.
private void sanityCheckRingConsistency() {
Multimap<Set<TokenRange>, InetSocketAddress> tokenRangesToHost = HashMultimap.create();
for (InetSocketAddress host : cassandra.getPools().keySet()) {
CassandraClient client = null;
try {
client = CassandraClientFactory.getClientInternal(host, config);
try {
client.describe_keyspace(config.getKeyspaceOrThrow());
} catch (NotFoundException e) {
// don't care to check for ring consistency when we're not even fully initialized
return;
}
tokenRangesToHost.put(ImmutableSet.copyOf(client.describe_ring(config.getKeyspaceOrThrow())), host);
} catch (Exception e) {
log.warn("Failed to get ring info from host: {}", SafeArg.of("host", CassandraLogHelper.host(host)), e);
} finally {
if (client != null) {
client.getOutputProtocol().getTransport().close();
}
}
}
if (tokenRangesToHost.isEmpty()) {
log.warn("Failed to get ring info for entire Cassandra cluster ({});" + " ring could not be checked for consistency.", UnsafeArg.of("keyspace", config.getKeyspaceOrThrow()));
return;
}
if (tokenRangesToHost.keySet().size() == 1) {
// all nodes agree on a consistent view of the cluster. Good.
return;
}
RuntimeException ex = new IllegalStateException("Hosts have differing ring descriptions." + " This can lead to inconsistent reads and lost data. ");
log.error("Cassandra does not appear to have a consistent ring across all of its nodes. This could cause us to" + " lose writes. The mapping of token ranges to hosts is:\n{}", UnsafeArg.of("tokenRangesToHost", CassandraLogHelper.tokenRangesToHost(tokenRangesToHost)), ex);
// provide some easier to grok logging for the two most common cases
if (tokenRangesToHost.size() > 2) {
tokenRangesToHost.asMap().entrySet().stream().filter(entry -> entry.getValue().size() == 1).forEach(entry -> {
// We've checked above that entry.getValue() has one element, so we never NPE here.
String hostString = CassandraLogHelper.host(Iterables.getFirst(entry.getValue(), null));
log.error("Host: {} disagrees with the other nodes about the ring state.", SafeArg.of("host", hostString));
});
}
if (tokenRangesToHost.keySet().size() == 2) {
ImmutableList<Set<TokenRange>> sets = ImmutableList.copyOf(tokenRangesToHost.keySet());
Set<TokenRange> set1 = sets.get(0);
Set<TokenRange> set2 = sets.get(1);
log.error("Hosts are split. group1: {} group2: {}", SafeArg.of("hosts1", CassandraLogHelper.collectionOfHosts(tokenRangesToHost.get(set1))), SafeArg.of("hosts2", CassandraLogHelper.collectionOfHosts(tokenRangesToHost.get(set2))));
}
CassandraVerifier.logErrorOrThrow(ex.getMessage(), config.ignoreInconsistentRingChecks());
}
use of org.apache.flink.shaded.guava30.com.google.common.collect.Sets in project n4js by eclipse.
the class NpmExportWizard method init.
@Override
public void init(IWorkbench targetWorkbench, IStructuredSelection currentSelection) {
// this.selection = currentSelection;
List<?> selectedResources = IDE.computeSelectedResources(currentSelection);
List<IProject> workspaceProjects = Arrays.asList(ResourcesPlugin.getWorkspace().getRoot().getProjects());
// Find all selected projects
Set<IProject> projects = selectedResources.stream().filter(m -> m instanceof IResource).map(m -> ((IResource) m).getProject()).filter(// only open projects
p -> p.isOpen()).collect(Collectors.toSet());
// make the behavior predictable by ordering:
TreeSet<IProject> sortedProjects = Sets.<IProject>newTreeSet((a, b) -> a.getName().compareToIgnoreCase(b.getName()));
sortedProjects.addAll(projects);
// 0) turn into IN4JSProject and give and process further.
// a) find out which projects fulfill the npm-"exportable"-contract
// b) give back a list to the user what to export,
// c) disable things not fullfilling the contract.
// d) take choosing from the list and pass to exporter in non-ui package.
// 0)
List<IN4JSEclipseProject> rawN4jsProjects = Lists.newArrayList();
iP2in4jsP = HashBiMap.create();
for (IProject iProject : workspaceProjects) {
IN4JSEclipseProject mappedIn4jsProject = map2In4js(iProject);
if (mappedIn4jsProject != null) {
rawN4jsProjects.add(mappedIn4jsProject);
iP2in4jsP.put(iProject, mappedIn4jsProject);
}
}
// filter out Non-N4JS-projects from initial selection.
sortedProjects.retainAll(iP2in4jsP.keySet());
// filter out all non-N4JS-projects from the workspace projects.
ArrayList<IProject> filteredWorkspaceProjects = new ArrayList<>(workspaceProjects);
filteredWorkspaceProjects.retainAll(iP2in4jsP.keySet());
setWindowTitle("N4JS to npm Export");
setNeedsProgressMonitor(true);
Map<IProject, Boolean> selectedProjects = new HashMap<>();
// Add all workspace projects to list, default selection value is false
filteredWorkspaceProjects.forEach(project -> selectedProjects.put(project, false));
// Override selection value for all initially selected projects
sortedProjects.forEach(project -> selectedProjects.put(project, true));
// exportPage = new ExportSelectionPage("Export Page", rawN4jsProjects, labelProvider);
exportPage = new ExportSelectionPage("Export Page", selectedProjects);
if (runTools())
toolRunnerPage = new NpmToolRunnerPage("npm Execution Page");
comparePage = new PackageJsonComparePage("Compare package.json Page");
pageListener = new IPageChangedListener() {
@Override
public void pageChanged(PageChangedEvent event) {
if (event.getSelectedPage() == comparePage) {
udpatePackagJasonComparison();
}
}
};
}
use of org.apache.flink.shaded.guava30.com.google.common.collect.Sets in project CzechIdMng by bcvsolutions.
the class DefaultIdmRoleRequestService method executeRequestInternal.
private IdmRoleRequestDto executeRequestInternal(EntityEvent<IdmRoleRequestDto> requestEvent) {
UUID requestId = requestEvent.getContent().getId();
Assert.notNull(requestId, "Role request ID is required!");
IdmRoleRequestDto request = this.get(requestId, new IdmRoleRequestFilter(true));
Assert.notNull(request, "Role request is required!");
List<IdmConceptRoleRequestDto> concepts = request.getConceptRoles();
IdmIdentityDto identity = identityService.get(request.getApplicant());
boolean identityNotSame = concepts.stream().anyMatch(concept -> {
// get contract DTO from embedded map
IdmIdentityContractDto contract = (IdmIdentityContractDto) concept.getEmbedded().get(IdmConceptRoleRequestService.IDENTITY_CONTRACT_FIELD);
if (contract == null) {
contract = identityContractService.get(concept.getIdentityContract());
}
Assert.notNull(contract, "Contract cannot be empty!");
return !identity.getId().equals(contract.getIdentity());
});
if (identityNotSame) {
throw new RoleRequestException(CoreResultCode.ROLE_REQUEST_APPLICANTS_NOT_SAME, ImmutableMap.of("request", request, "applicant", identity.getUsername()));
}
// Add changed identity-roles to event (prevent redundant search). We will used them for recalculations (ACM / provisioning).
// Beware!! Sets have to be defined here, because without that will be not propagated to a sub event (role-request -> identity-role event)!
requestEvent.getProperties().put(IdentityRoleEvent.PROPERTY_ASSIGNED_NEW_ROLES, Sets.newHashSet());
requestEvent.getProperties().put(IdentityRoleEvent.PROPERTY_ASSIGNED_UPDATED_ROLES, Sets.newHashSet());
requestEvent.getProperties().put(IdentityRoleEvent.PROPERTY_ASSIGNED_REMOVED_ROLES, Sets.newHashSet());
requestEvent.getProperties().put(IdmAccountDto.IDENTITY_ACCOUNT_FOR_DELAYED_ACM, Sets.newHashSet());
requestEvent.getProperties().put(IdmAccountDto.ACCOUNT_FOR_ADDITIONAL_PROVISIONING, Sets.newHashSet());
// Remove not approved concepts.
List<IdmConceptRoleRequestDto> approvedConcepts = concepts.stream().filter(concept -> {
// approval event disabled).
return RoleRequestState.APPROVED == concept.getState() || RoleRequestState.CONCEPT == concept.getState();
}).collect(Collectors.toList());
// Add concepts for business roles.
List<IdmIdentityRoleDto> allAssignedRoles = identityRoleService.findAllByIdentity(identity.getId());
List<IdmConceptRoleRequestDto> allApprovedConcepts = appendBusinessRoleConcepts(approvedConcepts, allAssignedRoles);
// Create new identity role.
allApprovedConcepts.stream().filter(concept -> ConceptRoleRequestOperation.ADD == concept.getOperation()).forEach(concept -> {
if (!cancelInvalidConcept(allAssignedRoles, concept, request)) {
// assign new role
createAssignedRole(allApprovedConcepts, concept, request, requestEvent);
}
flushHibernateSession();
});
// Update identity role
allApprovedConcepts.stream().filter(concept -> ConceptRoleRequestOperation.UPDATE == concept.getOperation()).forEach(concept -> {
if (!cancelInvalidConcept(allAssignedRoles, concept, request)) {
updateAssignedRole(allApprovedConcepts, concept, request, requestEvent);
}
flushHibernateSession();
});
// Delete identity sub roles at first (prevent to delete sub roles by referential integrity).
allApprovedConcepts.stream().filter(concept -> ConceptRoleRequestOperation.REMOVE == concept.getOperation()).filter(concept -> concept.getDirectConcept() != null).forEach(concept -> {
if (!cancelInvalidConcept(allAssignedRoles, concept, request)) {
removeAssignedRole(concept, request, requestEvent);
}
flushHibernateSession();
});
// Delete direct identity role
allApprovedConcepts.stream().filter(concept -> ConceptRoleRequestOperation.REMOVE == concept.getOperation()).filter(concept -> concept.getDirectConcept() == null).forEach(concept -> {
if (!cancelInvalidConcept(allAssignedRoles, concept, request)) {
removeAssignedRole(concept, request, requestEvent);
}
flushHibernateSession();
});
return this.save(request);
}
use of org.apache.flink.shaded.guava30.com.google.common.collect.Sets in project kylo by Teradata.
the class DefaultFeedManagerFeedService method assignFeedDatasources.
/**
* Assign the feed sources/destinations
*
* @param feed the feed rest model
* @param domainFeed the domain feed
*/
private void assignFeedDatasources(FeedMetadata feed, Feed domainFeed) {
final Feed.ID domainFeedId = domainFeed.getId();
Set<com.thinkbiganalytics.metadata.api.catalog.DataSet.ID> sourceDataSets = new HashSet<>();
Set<com.thinkbiganalytics.metadata.api.datasource.Datasource.ID> sourceDatasources = new HashSet<>();
Set<com.thinkbiganalytics.metadata.api.datasource.Datasource.ID> destinationDatasources = new HashSet<>();
String uniqueName = FeedNameUtil.fullName(feed.getCategory().getSystemName(), feed.getSystemFeedName());
RegisteredTemplate template = feed.getRegisteredTemplate();
if (template == null) {
// fetch it for checks
template = templateRestProvider.getRegisteredTemplate(feed.getTemplateId());
}
// Collect the IDs of the legacy datasources the feed had referenced
Set<Datasource.ID> previousSourceIds = domainFeed.getSources().stream().filter(fs -> fs.getDatasource().isPresent()).map(fs -> fs.getDatasource().get().getId()).collect(Collectors.toSet());
Set<Datasource.ID> previousDestIds = domainFeed.getDestinations().stream().filter(// Currently will always be true as there are no destination data sets yet.
fs -> fs.getDatasource().isPresent()).map(fs -> fs.getDatasource().get().getId()).collect(Collectors.toSet());
boolean isSampleDataSet = isTreatSourceDataSetsAsSample(feed, template);
// find Definition registration
derivedDatasourceFactory.populateDatasources(feed, template, sourceDatasources, destinationDatasources);
// Replace the older legacy datasource references with the new ones.
previousSourceIds.stream().filter(id -> !sourceDatasources.contains(id)).forEach(id -> feedProvider.removeFeedSource(domainFeedId, id));
sourceDatasources.stream().forEach(sourceId -> feedProvider.ensureFeedSource(domainFeedId, sourceId));
previousDestIds.stream().filter(id -> !destinationDatasources.contains(id)).forEach(id -> feedProvider.removeFeedDestination(domainFeedId, id));
destinationDatasources.stream().forEach(sourceId -> feedProvider.ensureFeedDestination(domainFeedId, sourceId));
// Update data sets
if (feed.getSourceDataSets() != null) {
// Collect the IDs of source data sets the feed had referenced
Set<com.thinkbiganalytics.metadata.api.catalog.DataSet.ID> currentDataSetIds = domainFeed.getSources().stream().map(FeedSource::getDataSet).filter(Optional::isPresent).map(Optional::get).map(com.thinkbiganalytics.metadata.api.catalog.DataSet::getId).collect(Collectors.toSet());
Set<com.thinkbiganalytics.metadata.api.catalog.DataSet.ID> newDataSetIds = new HashSet<>();
feed.getSourceDataSets().forEach(dataSet -> {
com.thinkbiganalytics.metadata.api.catalog.DataSet addedDataSet;
if (dataSet.getId() == null) {
DataSource.ID dataSourceId = dataSourceProvider.resolveId(dataSet.getDataSource().getId());
dataSourceProvider.find(dataSourceId).orElseThrow(() -> new DataSourceNotFoundException(dataSourceId));
addedDataSet = catalogModelTransform.buildDataSet(dataSet, dataSetProvider.build(dataSourceId));
} else {
com.thinkbiganalytics.metadata.api.catalog.DataSet.ID dataSetId = dataSetProvider.resolveId(dataSet.getId());
addedDataSet = dataSetProvider.find(dataSetId).orElseThrow(() -> new DataSetNotFoundException(dataSetId));
}
newDataSetIds.add(addedDataSet.getId());
catalogModelTransform.updateDataSet(dataSet, addedDataSet);
feedProvider.ensureFeedSource(domainFeedId, addedDataSet.getId(), isSampleDataSet);
});
// Remove any data set sources no longer referenced in the updated feed.
currentDataSetIds.stream().filter(id -> !newDataSetIds.contains(id)).forEach(id -> feedProvider.removeFeedSource(domainFeedId, id));
}
}
Aggregations