use of eu.etaxonomy.cdm.model.term.TermVocabulary in project cdmlib by cybertaxonomy.
the class SDDDocumentBuilder method buildCharacters.
/**
* Builds Characters associated with the Dataset
*/
public void buildCharacters(ElementImpl dataset) throws ParseException {
if (cdmSource.getTerms() != null) {
ElementImpl elCharacters = new ElementImpl(document, CHARACTERS);
int f = cdmSource.getTerms().size();
for (int i = 0; i < f; i++) {
if (cdmSource.getTerms().get(i) instanceof Feature) {
Feature character = (Feature) cdmSource.getTerms().get(i);
if (character.isSupportsQuantitativeData()) {
ElementImpl elQuantitativeCharacter = new ElementImpl(document, QUANTITATIVE_CHARACTER);
charactersCount = buildReference(character, characters, ID, elQuantitativeCharacter, "c", charactersCount);
buildRepresentation(elQuantitativeCharacter, character);
elCharacters.appendChild(elQuantitativeCharacter);
}
if (character.isSupportsCategoricalData()) {
ElementImpl elCategoricalCharacter = new ElementImpl(document, CATEGORICAL_CHARACTER);
charactersCount = buildReference(character, characters, ID, elCategoricalCharacter, "c", charactersCount);
buildRepresentation(elCategoricalCharacter, character);
Set<TermVocabulary<State>> enumerations = character.getSupportedCategoricalEnumerations();
if (enumerations != null) {
if (enumerations.size() > 0) {
ElementImpl elStates = new ElementImpl(document, STATES);
TermVocabulary tv = (TermVocabulary) enumerations.toArray()[0];
Set<State> stateList = tv.getTerms();
for (int j = 0; j < stateList.size(); j++) {
ElementImpl elStateDefinition = new ElementImpl(document, STATE_DEFINITION);
State state = (State) stateList.toArray()[j];
statesCount = buildReference(state, states, ID, elStateDefinition, "s", statesCount);
buildRepresentation(elStateDefinition, state);
elStates.appendChild(elStateDefinition);
}
elCategoricalCharacter.appendChild(elStates);
elCharacters.appendChild(elCategoricalCharacter);
}
}
}
if (character.isSupportsTextData()) {
ElementImpl elTextCharacter = new ElementImpl(document, TEXT_CHARACTER);
textcharactersCount = buildReference(character, characters, ID, elTextCharacter, TEXT, textcharactersCount);
buildRepresentation(elTextCharacter, character);
elCharacters.appendChild(elTextCharacter);
}
}
}
dataset.appendChild(elCharacters);
}
}
use of eu.etaxonomy.cdm.model.term.TermVocabulary in project cdmlib by cybertaxonomy.
the class TestingTermVocabularyDao method findByUuid.
protected TermVocabulary<?> findByUuid(UUID uuid) throws DataAccessException {
Session session = getSession();
Criteria crit = session.createCriteria(TermVocabulary.class);
crit.add(Restrictions.eq("uuid", uuid));
crit.addOrder(Order.desc("created"));
@SuppressWarnings("unchecked") List<TermVocabulary<?>> results = crit.list();
if (results.isEmpty()) {
return null;
} else {
if (results.size() > 1) {
logger.error("findByUuid() delivers more than one result for UUID: " + uuid);
}
return results.get(0);
}
}
use of eu.etaxonomy.cdm.model.term.TermVocabulary in project cdmlib by cybertaxonomy.
the class FeatureDto method termDtoListFrom.
public static List<TermDto> termDtoListFrom(List<Object[]> results) {
// list to ensure order
List<TermDto> dtos = new ArrayList<>();
// map to handle multiple representations/media/vocRepresentation because of LEFT JOIN
Map<UUID, TermDto> dtoMap = new HashMap<>(results.size());
for (Object[] elements : results) {
UUID uuid = (UUID) elements[0];
if (dtoMap.containsKey(uuid)) {
// multiple results for one term -> multiple (voc) representation/media
if (elements[1] != null) {
dtoMap.get(uuid).addRepresentation((Representation) elements[1]);
}
// }
if (elements[9] != null) {
dtoMap.get(uuid).addMedia(((Media) elements[9]).getUuid());
}
} else {
// term representation
Set<Representation> representations = new HashSet<>();
if (elements[1] instanceof Representation) {
representations = new HashSet<Representation>(1);
representations.add((Representation) elements[1]);
}
// term media
Set<UUID> mediaUuids = new HashSet<>();
if (elements[9] instanceof Media) {
mediaUuids.add(((Media) elements[9]).getUuid());
}
// voc representation
// Set<Representation> vocRepresentations = new HashSet<>();
// if(elements[7] instanceof Representation) {
// vocRepresentations = new HashSet<Representation>(7);
// vocRepresentations.add((Representation)elements[7]);
// }
boolean isAvailableForTaxon = false;
boolean isAvailableForTaxonName = false;
boolean isAvailableForOccurrence = false;
EnumSet<CdmClass> availableForString = (EnumSet<CdmClass>) elements[10];
if (availableForString.contains(CdmClass.TAXON)) {
isAvailableForTaxon = true;
}
if (availableForString.contains(CdmClass.TAXON_NAME)) {
isAvailableForTaxonName = true;
}
if (availableForString.contains(CdmClass.OCCURRENCE)) {
isAvailableForOccurrence = true;
}
boolean isSupportsCategoricalData = false;
boolean isSupportsQuantitativeData = false;
EnumSet<CdmClass> supportsString = (EnumSet<CdmClass>) elements[12];
if (supportsString.contains(CdmClass.CATEGORICAL_DATA)) {
isSupportsCategoricalData = true;
}
if (supportsString.contains(CdmClass.QUANTITATIVE_DATA)) {
isSupportsQuantitativeData = true;
}
Object o = elements[13];
Set<TermVocabularyDto> recommendedModifierDtos = new HashSet<>();
if (o instanceof TermVocabulary) {
recommendedModifierDtos.add(TermVocabularyDto.fromVocabulary((TermVocabulary) o));
} else if (o instanceof Set) {
Set<TermVocabulary<DefinedTerm>> recommendedModifierEnumeration = (Set<TermVocabulary<DefinedTerm>>) o;
if (recommendedModifierEnumeration != null) {
for (TermVocabulary<DefinedTerm> voc : recommendedModifierEnumeration) {
recommendedModifierDtos.add(TermVocabularyDto.fromVocabulary(voc));
}
}
}
o = elements[14];
Set<TermDto> recommendedStatisticalMeasuresDtos = new HashSet<>();
if (o instanceof StatisticalMeasure) {
recommendedStatisticalMeasuresDtos.add(TermDto.fromTerm((StatisticalMeasure) o));
} else if (o instanceof Set) {
Set<StatisticalMeasure> recommendedStatisticalMeasures = new HashSet((Set<StatisticalMeasure>) o);
if (recommendedStatisticalMeasures != null) {
for (StatisticalMeasure term : recommendedStatisticalMeasures) {
recommendedStatisticalMeasuresDtos.add(TermDto.fromTerm(term));
}
}
}
o = elements[15];
Set<TermVocabularyDto> supportedCategoricalDtos = new HashSet<>();
if (o instanceof TermVocabulary) {
supportedCategoricalDtos.add(TermVocabularyDto.fromVocabulary((TermVocabulary) o));
} else if (o instanceof Set) {
Set<TermVocabulary> supportedCategoricalEnumerations = (Set<TermVocabulary>) o;
for (TermVocabulary<State> voc : supportedCategoricalEnumerations) {
supportedCategoricalDtos.add(TermVocabularyDto.fromVocabulary(voc));
}
}
// if (supportedCategoricalEnumerations != null){
// for (TermVocabulary<State> voc: supportedCategoricalEnumerations){
// supportedCategoricalDtos.add(TermVocabularyDto.fromVocabulary(voc));
// }
// }
o = elements[16];
Set<TermDto> recommendedMeasurementUnitsDtos = new HashSet<>();
if (o instanceof MeasurementUnit) {
recommendedMeasurementUnitsDtos.add(TermDto.fromTerm((MeasurementUnit) o));
} else if (o instanceof Set) {
Set<MeasurementUnit> recommendedMeasurementUnits = (Set<MeasurementUnit>) elements[16];
for (MeasurementUnit term : recommendedMeasurementUnits) {
recommendedMeasurementUnitsDtos.add(TermDto.fromTerm(term));
}
}
// if (recommendedMeasurementUnits != null){
// for (MeasurementUnit term: recommendedMeasurementUnits){
// recommendedMeasurementUnitsDtos.add(TermDto.fromTerm(term));
// }
// }
TermDto termDto = new FeatureDto(uuid, representations, (UUID) elements[2], (UUID) elements[3], (UUID) elements[4], (Integer) elements[5], (String) elements[6], // vocRepresentations,
isAvailableForTaxon, isAvailableForTaxonName, isAvailableForOccurrence, (String) elements[11], isSupportsCategoricalData, isSupportsQuantitativeData, supportedCategoricalDtos, recommendedModifierDtos, recommendedMeasurementUnitsDtos, recommendedStatisticalMeasuresDtos);
termDto.setUri((URI) elements[8]);
termDto.setMedia(mediaUuids);
dtoMap.put(uuid, termDto);
dtos.add(termDto);
}
}
return dtos;
}
use of eu.etaxonomy.cdm.model.term.TermVocabulary in project cdmlib by cybertaxonomy.
the class Cdm2CdmVocabularyImportTest method testInvokeVocabulary.
@Test
public void testInvokeVocabulary() {
@SuppressWarnings("unchecked") TermVocabulary<DefinedTerm> voc = vocService.find(uuidStructVoc);
Assert.assertNull("Vocabulary must not exist before invoke", voc);
ImportResult result = defaultImport.invoke(this.configurator);
Assert.assertTrue(result.isSuccess());
commitAndStartNewTransaction();
voc = vocService.find(uuidStructVoc);
Assert.assertNotNull("Vocabulary must exist after invoke", voc);
@SuppressWarnings("unchecked") TermVocabulary<DefinedTerm> otherVoc = otherRepository.getVocabularyService().find(uuidStructVoc);
Assert.assertNotSame(otherVoc, voc);
Assert.assertEquals(1, voc.getTerms().size());
// add term in other
UUID uuidSecond = UUID.fromString("56546e58-e4ea-47f9-ae49-de772a416003");
DefinedTerm secondTerm = getStructure("2.", uuidSecond);
TransactionStatus tx = otherRepository.startTransaction();
otherVoc = otherRepository.getVocabularyService().find(uuidStructVoc);
otherVoc.addTerm(secondTerm);
otherRepository.getTermService().saveOrUpdate(secondTerm);
otherRepository.commitTransaction(tx);
// test if added term gets imported
commitAndStartNewTransaction();
voc = vocService.find(uuidStructVoc);
Assert.assertEquals(1, voc.getTerms().size());
commitAndStartNewTransaction();
result = defaultImport.invoke(this.configurator);
commitAndStartNewTransaction();
voc = vocService.find(uuidStructVoc);
Assert.assertEquals(2, voc.getTerms().size());
Assert.assertTrue("As contains works on equal() the term should be contained", voc.getTerms().contains(secondTerm));
voc.getTerms().stream().filter(a -> a.getUuid().equals(uuidSecond)).forEach(t -> {
Assert.assertEquals(secondTerm, t);
Assert.assertNotSame(secondTerm, t);
});
// test invoke for graph
configurator.setGraphFilter(new HashSet<>(Arrays.asList(uuidStructGraph)));
TermTree<DefinedTerm> graph = treeService.find(uuidStructGraph);
Assert.assertNull("Graph must not exist before invoke", graph);
result = defaultImport.invoke(this.configurator);
Assert.assertTrue(result.isSuccess());
commitAndStartNewTransaction();
graph = treeService.find(uuidStructGraph);
Assert.assertNotNull("Graph must exist after invoke", graph);
TransactionStatus txOther = otherRepository.startTransaction();
@SuppressWarnings("unchecked") TermTree<DefinedTerm> otherGraph = otherRepository.getTermTreeService().find(uuidStructGraph);
Assert.assertNotSame(otherGraph, graph);
Assert.assertEquals(otherGraph.getRoot(), graph.getRoot());
Assert.assertNotSame(otherGraph.getRoot(), graph.getRoot());
Assert.assertEquals(1, graph.getRootChildren().size());
TermNode<DefinedTerm> otherSingleChild = otherGraph.getRootChildren().iterator().next();
TermNode<DefinedTerm> thisSingleChild = graph.getRootChildren().iterator().next();
Assert.assertEquals(otherSingleChild, thisSingleChild);
Assert.assertNotSame(otherSingleChild, thisSingleChild);
otherRepository.commitTransaction(txOther);
}
use of eu.etaxonomy.cdm.model.term.TermVocabulary in project cdmlib by cybertaxonomy.
the class StructureTreeOwlImportTest method testImportStructureTree.
@Test
@DataSet(value = "/eu/etaxonomy/cdm/database/BlankDataSet.xml")
public void testImportStructureTree() throws URISyntaxException {
URL url = this.getClass().getResource("/eu/etaxonomy/cdm/io/owl/in/test_structures.owl");
URI uri = URI.fromUrl(url);
assertNotNull(url);
StructureTreeOwlImportConfigurator configurator = StructureTreeOwlImportConfigurator.NewInstance(uri);
boolean result = defaultImport.invoke(configurator).isSuccess();
assertTrue("Return value for import.invoke should be true", result);
this.setComplete();
this.endTransaction();
String treeLabel = "test_structures";
List<TermTree> trees = termTreeServcie.listByTitle(TermTree.class, treeLabel, MatchMode.EXACT, null, null, null, null, null);
List<String> nodeProperties = new ArrayList<>();
nodeProperties.add("term");
nodeProperties.add("term.media");
TermTree<Feature> tree = termTreeServcie.loadWithNodes(trees.iterator().next().getUuid(), null, nodeProperties);
assertNotNull("featureTree should not be null", tree);
assertEquals("Tree has wrong term type", TermType.Structure, tree.getTermType());
assertEquals("Wrong number of distinct features", 4, tree.getDistinctTerms().size());
List rootChildren = tree.getRootChildren();
assertEquals("Wrong number of root children", 1, rootChildren.size());
Object entirePlant = rootChildren.iterator().next();
assertTrue("Root is no feature node", entirePlant instanceof TermNode);
assertEquals("Root node has wrong term type", TermType.Structure, ((TermNode) entirePlant).getTermType());
TermNode<DefinedTerm> entirePlantNode = (TermNode<DefinedTerm>) entirePlant;
List<TermNode<DefinedTerm>> childNodes = entirePlantNode.getChildNodes();
assertEquals("Wrong number of children", 2, childNodes.size());
String inflorescenceLabel = "inflorescence";
String inflorescenceDescription = " the part of the plant that bears the flowers, including all its bracts branches and flowers but excluding unmodified leaves ";
List<DefinedTerm> records = termService.findByRepresentationText(inflorescenceDescription, DefinedTerm.class, null, null).getRecords();
assertEquals("wrong number of terms found for \"inflorescence\"", 1, records.size());
DefinedTerm inflorescence = records.iterator().next();
assertEquals(inflorescenceLabel, inflorescence.getLabel(Language.ENGLISH()));
for (TermNode<DefinedTerm> termNode : childNodes) {
assertTrue("Child node not found. Found node with term: " + termNode.getTerm().getLabel(), termNode.getTerm().getUuid().equals(inflorescence.getUuid()) || termNode.getTerm().getLabel(Language.ENGLISH()).equals("Flower"));
if (termNode.getTerm().getUuid().equals(inflorescence.getUuid())) {
assertEquals("Term mismatch", inflorescence, termNode.getTerm());
inflorescence = termNode.getTerm();
assertEquals("wrong id in vocabulary", "inflorescence", inflorescence.getIdInVocabulary());
assertEquals("wrong symbol", "infloSymbol", inflorescence.getSymbol());
assertEquals("wrong symbol2", "infloSymbol2", inflorescence.getSymbol2());
Set<Media> mediaSet = inflorescence.getMedia();
assertEquals("wrong number of media", 1, mediaSet.size());
Media media = mediaSet.iterator().next();
MediaRepresentationPart part = MediaUtils.getFirstMediaRepresentationPart(media);
assertNotNull("media part not found", part);
assertEquals("incorrect URI", URI.create("https://upload.wikimedia.org/wikipedia/commons/8/82/Aloe_hereroensis_Auob_C15.JPG"), part.getUri());
assertEquals("incorrect title", "Aloe hereroensis", media.getTitle(Language.DEFAULT()).getText());
Representation englishRepresentation = inflorescence.getRepresentation(Language.ENGLISH());
assertTrue("Description not found", CdmUtils.isNotBlank(englishRepresentation.getDescription()));
assertEquals("Description wrong", inflorescenceDescription, englishRepresentation.getDescription());
assertEquals("wrong plural", "inflorescences", englishRepresentation.getPlural());
assertEquals("wrong label abbrev", "inflo", englishRepresentation.getAbbreviatedLabel());
// german representation
assertEquals("wrong number of representations", 2, inflorescence.getRepresentations().size());
Representation germanRepresentation = inflorescence.getRepresentation(Language.GERMAN());
assertNotNull("Representation is null for " + Language.GERMAN(), germanRepresentation);
assertEquals("wrong description", "Der Teil der Pflanze, der die Bluete traegt", germanRepresentation.getDescription());
assertEquals("wrong label", "Infloreszenz", germanRepresentation.getLabel());
}
}
assertNotNull("term is null", inflorescence);
assertEquals("Wrong term type", TermType.Structure, inflorescence.getTermType());
String vocLabel = "03 Generative Structures";
List<TermVocabulary> vocs = vocabularyService.findByTitle(TermVocabulary.class, vocLabel, MatchMode.EXACT, null, null, null, null, Arrays.asList("terms")).getRecords();
assertEquals("wrong number of vocabularies", 1, vocs.size());
TermVocabulary termVoc = vocs.iterator().next();
assertEquals("Wrong vocabulary label", vocLabel, termVoc.getTitleCache());
Collection<TermDto> topLevelTerms = vocabularyService.getTopLevelTerms(termVoc.getUuid());
assertEquals("wrong number of top level terms", 4, topLevelTerms.size());
}
Aggregations