use of org.apache.clerezza.commons.rdf.Triple in project stanbol by apache.
the class TestMetaxaCore method printTriples.
/**
* This prints out the Stanbol Enhancer triples that would be created for the metadata
* contained in the given model.
*
* @param m a {@link Model}
*
* @return an {@code int} with the number of added triples
*/
private int printTriples(Model m) {
int tripleCounter = 0;
HashMap<BlankNode, BlankNode> blankNodeMap = new HashMap<BlankNode, BlankNode>();
ClosableIterator<Statement> it = m.iterator();
while (it.hasNext()) {
Statement oneStmt = it.next();
BlankNodeOrIRI subject = (BlankNodeOrIRI) MetaxaEngine.asClerezzaResource(oneStmt.getSubject(), blankNodeMap);
IRI predicate = (IRI) MetaxaEngine.asClerezzaResource(oneStmt.getPredicate(), blankNodeMap);
RDFTerm object = MetaxaEngine.asClerezzaResource(oneStmt.getObject(), blankNodeMap);
if (null != subject && null != predicate && null != object) {
Triple t = new TripleImpl(subject, predicate, object);
LOG.debug("adding " + t);
tripleCounter++;
} else {
LOG.debug("skipped " + oneStmt.toString());
}
}
it.close();
return tripleCounter;
}
use of org.apache.clerezza.commons.rdf.Triple in project stanbol by apache.
the class ClerezzaOntologyProvider method fillImportsReverse.
/**
* Fills a reverse stack of import targets for the graph identified by key <tt>importing</tt>. The import
* tree is visited in <i>pre-order</i> and the stack is filled accordingly. Optionally, a second stack can
* be supplied to store only the level 1 imports. This can be used for preserving the original import tree
* structure.<br>
* <br>
* TODO there should be a more space-efficient implementation.
*
* @param importing
* the key of the root graph, which will be at the bottom of every list.
* @param reverseImports
* the list that will store all import target keys in pre-order.
* @param level1Imports
* a second list that will store the level 1 import target keys, and is not passed to recursive
* calls. Will be ignored if null.
*/
private void fillImportsReverse(OWLOntologyID importing, List<OWLOntologyID> reverseImports, List<OWLOntologyID> level1Imports) {
log.debug("Filling reverse imports for {}", importing);
// Add the importing ontology first
reverseImports.add(importing);
if (level1Imports != null)
level1Imports.add(importing);
// Get the graph and explore its imports
// store.getTriples(importing);
Graph graph = getStoredOntology(/* getPublicKey */
(importing), Graph.class, false);
Iterator<Triple> it = graph.filter(null, RDF.type, OWL.Ontology);
if (!it.hasNext())
return;
Iterator<Triple> it2 = graph.filter(it.next().getSubject(), OWL.imports, null);
while (it2.hasNext()) {
// obj is the *original* import target
RDFTerm obj = it2.next().getObject();
if (obj instanceof IRI) {
// Right now getKey() is returning the "private" storage ID
String key = getKey(org.semanticweb.owlapi.model.IRI.create(((IRI) obj).getUnicodeString()));
// TODO this will not be needed when getKey() and getPublicKey() return the proper public key.
OWLOntologyID oid = keymap.getReverseMapping(new IRI(key));
// (Unoptimized, should not use contains() for stacks.)
if (!reverseImports.contains(oid)) {
if (level1Imports != null)
level1Imports.add(oid);
fillImportsReverse(oid, reverseImports, null);
}
}
}
}
use of org.apache.clerezza.commons.rdf.Triple in project stanbol by apache.
the class ClerezzaOntologyProvider method getOntologyNetworkConfiguration.
public OntologyNetworkConfiguration getOntologyNetworkConfiguration() {
Map<String, Collection<OWLOntologyID>> coreOntologies = new HashMap<String, Collection<OWLOntologyID>>(), customOntologies = new HashMap<String, Collection<OWLOntologyID>>();
Map<String, Collection<String>> attachedScopes = new HashMap<String, Collection<String>>();
final Graph meta = store.getGraph(new IRI(metaGraphId));
// Scopes first
for (Iterator<Triple> it = meta.filter(null, RDF.type, SCOPE_URIREF); it.hasNext(); ) {
// for each
// scope
Triple ta = it.next();
BlankNodeOrIRI sub = ta.getSubject();
if (sub instanceof IRI) {
String s = ((IRI) sub).getUnicodeString(), prefix = _NS_STANBOL_INTERNAL + Scope.shortName + "/";
if (s.startsWith(prefix)) {
String scopeId = s.substring(prefix.length());
log.info("Rebuilding scope \"{}\".", scopeId);
coreOntologies.put(scopeId, new TreeSet<OWLOntologyID>());
customOntologies.put(scopeId, new TreeSet<OWLOntologyID>());
IRI core_ur = null, custom_ur = null;
RDFTerm r;
// Check core space
Iterator<Triple> it2 = meta.filter(sub, HAS_SPACE_CORE_URIREF, null);
if (it2.hasNext()) {
r = it2.next().getObject();
if (r instanceof IRI)
core_ur = (IRI) r;
} else {
it2 = meta.filter(null, IS_SPACE_CORE_OF_URIREF, sub);
if (it2.hasNext()) {
r = it2.next().getSubject();
if (r instanceof IRI)
core_ur = (IRI) r;
}
}
// Check custom space
it2 = meta.filter(sub, HAS_SPACE_CUSTOM_URIREF, null);
if (it2.hasNext()) {
r = it2.next().getObject();
if (r instanceof IRI)
custom_ur = (IRI) r;
} else {
it2 = meta.filter(null, IS_SPACE_CUSTOM_OF_URIREF, sub);
if (it2.hasNext()) {
r = it2.next().getSubject();
if (r instanceof IRI)
custom_ur = (IRI) r;
}
}
// retrieve the ontologies
if (core_ur != null) {
for (it2 = meta.filter(core_ur, null, null); it2.hasNext(); ) {
Triple t = it2.next();
IRI predicate = t.getPredicate();
if (predicate.equals(MANAGES_URIREF)) {
if (t.getObject() instanceof IRI)
coreOntologies.get(scopeId).add(// FIXME must be very
keymap.buildPublicKey((IRI) t.getObject()));
}
}
for (it2 = meta.filter(null, null, core_ur); it2.hasNext(); ) {
Triple t = it2.next();
IRI predicate = t.getPredicate();
if (predicate.equals(IS_MANAGED_BY_URIREF)) {
if (t.getSubject() instanceof IRI)
coreOntologies.get(scopeId).add(// FIXME must be very
keymap.buildPublicKey((IRI) t.getSubject()));
}
}
}
if (custom_ur != null) {
for (it2 = meta.filter(custom_ur, null, null); it2.hasNext(); ) {
Triple t = it2.next();
IRI predicate = t.getPredicate();
if (predicate.equals(MANAGES_URIREF)) {
if (t.getObject() instanceof IRI)
customOntologies.get(scopeId).add(// FIXME must be very
keymap.buildPublicKey((IRI) t.getObject()));
}
}
for (it2 = meta.filter(null, null, custom_ur); it2.hasNext(); ) {
Triple t = it2.next();
IRI predicate = t.getPredicate();
if (predicate.equals(IS_MANAGED_BY_URIREF)) {
if (t.getSubject() instanceof IRI)
customOntologies.get(scopeId).add(// FIXME must be very
keymap.buildPublicKey((IRI) t.getSubject()));
}
}
}
}
}
}
// Sessions next
Map<String, Collection<OWLOntologyID>> sessionOntologies = new HashMap<String, Collection<OWLOntologyID>>();
for (Iterator<Triple> it = meta.filter(null, RDF.type, SESSION_URIREF); it.hasNext(); ) {
// for each
// scope
Triple ta = it.next();
BlankNodeOrIRI sub = ta.getSubject();
if (sub instanceof IRI) {
IRI ses_ur = (IRI) sub;
String s = ((IRI) sub).getUnicodeString();
String prefix = _NS_STANBOL_INTERNAL + Session.shortName + "/";
if (s.startsWith(prefix)) {
String sessionId = s.substring(prefix.length());
log.info("Rebuilding session \"{}\".", sessionId);
sessionOntologies.put(sessionId, new TreeSet<OWLOntologyID>());
attachedScopes.put(sessionId, new TreeSet<String>());
// retrieve the ontologies
if (ses_ur != null) {
for (Iterator<Triple> it2 = meta.filter(ses_ur, MANAGES_URIREF, null); it2.hasNext(); ) {
RDFTerm obj = it2.next().getObject();
if (obj instanceof IRI)
sessionOntologies.get(sessionId).add(// FIXME must be very temporary!
keymap.buildPublicKey((IRI) obj));
}
for (Iterator<Triple> it2 = meta.filter(null, IS_MANAGED_BY_URIREF, ses_ur); it2.hasNext(); ) {
RDFTerm subj = it2.next().getSubject();
if (subj instanceof IRI)
sessionOntologies.get(sessionId).add(// FIXME must be very temporary!
keymap.buildPublicKey((IRI) subj));
}
for (Iterator<Triple> it2 = meta.filter(null, APPENDED_TO_URIREF, ses_ur); it2.hasNext(); ) {
RDFTerm subj = it2.next().getSubject();
if (subj instanceof IRI) {
String s1 = ((IRI) subj).getUnicodeString();
String prefix1 = _NS_STANBOL_INTERNAL + Scope.shortName + "/";
if (s1.startsWith(prefix1)) {
String scopeId = s1.substring(prefix1.length());
attachedScopes.get(sessionId).add(scopeId);
}
}
}
for (Iterator<Triple> it2 = meta.filter(ses_ur, HAS_APPENDED_URIREF, null); it2.hasNext(); ) {
RDFTerm obj = it2.next().getObject();
if (obj instanceof IRI) {
String s1 = ((IRI) obj).getUnicodeString();
String prefix1 = _NS_STANBOL_INTERNAL + Scope.shortName + "/";
if (s1.startsWith(prefix1)) {
String scopeId = s1.substring(prefix1.length());
attachedScopes.get(sessionId).add(scopeId);
}
}
}
}
}
}
}
return new OntologyNetworkConfiguration(coreOntologies, customOntologies, sessionOntologies, attachedScopes);
}
use of org.apache.clerezza.commons.rdf.Triple in project stanbol by apache.
the class ClerezzaOntologyProvider method toOWLOntology.
/**
* @param graphName
* @param forceMerge
* if set to false, the selected import management policy will be applied.
* @return
* @throws OWLOntologyCreationException
*/
protected OWLOntology toOWLOntology(IRI graphName, boolean forceMerge) throws OWLOntologyCreationException {
log.debug("Exporting graph to OWLOntology");
log.debug(" -- ImmutableGraph name : {}", graphName);
OWLOntologyManager mgr = OWLManager.createOWLOntologyManager();
// Never try to import
mgr.addIRIMapper(new PhonyIRIMapper(Collections.<org.semanticweb.owlapi.model.IRI>emptySet()));
Set<OWLOntologyID> loaded = new HashSet<OWLOntologyID>();
Graph graph = store.getGraph(graphName);
IRI ontologyId = null;
// Get the id of this ontology.
Iterator<Triple> itt = graph.filter(null, RDF.type, OWL.Ontology);
if (itt.hasNext()) {
BlankNodeOrIRI nl = itt.next().getSubject();
if (nl instanceof IRI)
ontologyId = (IRI) nl;
}
List<OWLOntologyID> revImps = new Stack<OWLOntologyID>();
List<OWLOntologyID> lvl1 = new Stack<OWLOntologyID>();
fillImportsReverse(keymap.getReverseMapping(graphName), revImps, lvl1);
// If not set to merge (either by policy of by force), adopt the set import policy.
if (!forceMerge && !ImportManagementPolicy.MERGE.equals(getImportManagementPolicy())) {
OWLOntology o = OWLAPIToClerezzaConverter.clerezzaGraphToOWLOntology(graph, mgr);
// TODO make it not flat.
// Examining the reverse imports stack will flatten all imports.
List<OWLOntologyChange> changes = new ArrayList<OWLOntologyChange>();
OWLDataFactory df = OWLManager.getOWLDataFactory();
List<OWLOntologyID> listToUse;
switch(getImportManagementPolicy()) {
case FLATTEN:
listToUse = revImps;
break;
case PRESERVE:
listToUse = lvl1;
break;
default:
listToUse = lvl1;
break;
}
for (OWLOntologyID ref : listToUse) if (!loaded.contains(ref) && !ref.equals(keymap.getReverseMapping(graphName))) {
changes.add(new AddImport(o, df.getOWLImportsDeclaration(ref.getOntologyIRI())));
loaded.add(ref);
}
o.getOWLOntologyManager().applyChanges(changes);
return o;
} else {
// If there is just the root ontology, convert it straight away.
if (revImps.size() == 1 && revImps.contains(graphName)) {
OWLOntology o = OWLAPIToClerezzaConverter.clerezzaGraphToOWLOntology(graph, mgr);
return o;
}
// FIXME when there's more than one ontology, this way of merging them seems inefficient...
Graph tempGraph = new IndexedGraph();
// The set of triples that will be excluded from the merge
Set<Triple> exclusions = new HashSet<Triple>();
// Examine all reverse imports
for (OWLOntologyID ref : revImps) if (!loaded.contains(ref)) {
// Get the triples
Graph imported = // store.getTriples(ref);
getStoredOntology(getKey(ref), Graph.class, false);
// For each owl:Ontology
Iterator<Triple> remove = imported.filter(null, RDF.type, OWL.Ontology);
while (remove.hasNext()) {
BlankNodeOrIRI subj = remove.next().getSubject();
/*
* If it's not the root ontology, trash all its triples. If the root ontology is
* anonymous, all ontology annotations are to be trashed without distinction.
*/
if (ontologyId == null || !subj.equals(ontologyId)) {
Iterator<Triple> it = imported.filter(subj, null, null);
while (it.hasNext()) {
Triple t = it.next();
exclusions.add(t);
}
}
}
Iterator<Triple> it = imported.iterator();
while (it.hasNext()) {
Triple t = it.next();
if (!exclusions.contains(t))
tempGraph.add(t);
}
loaded.add(ref);
}
// online.
return OWLAPIToClerezzaConverter.clerezzaGraphToOWLOntology(tempGraph, mgr);
}
}
use of org.apache.clerezza.commons.rdf.Triple in project stanbol by apache.
the class ClerezzaOntologyProvider method computeAliasClosure.
protected void computeAliasClosure(OWLOntologyID publicKey, Set<OWLOntologyID> target) {
target.add(publicKey);
Graph meta = getMetaGraph(Graph.class);
IRI ont = keymap.buildResource(publicKey);
Set<RDFTerm> resources = new HashSet<RDFTerm>();
// Forwards
for (Iterator<Triple> it = meta.filter(ont, OWL.sameAs, null); it.hasNext(); ) resources.add(it.next().getObject());
// Backwards
for (Iterator<Triple> it = meta.filter(null, OWL.sameAs, ont); it.hasNext(); ) resources.add(it.next().getSubject());
for (RDFTerm r : resources) if (r instanceof IRI) {
OWLOntologyID newKey = keymap.buildPublicKey((IRI) r);
if (!target.contains(newKey))
computeAliasClosure(newKey, target);
}
}
Aggregations