Search in sources :

Example 16 with RDFTerm

use of org.apache.clerezza.commons.rdf.RDFTerm in project stanbol by apache.

the class OpenCalaisEngine method queryModel.

/**
     * Extracts the relevant entity information from the Calais RDF data.
     * The entities and the relted information is extracted by a Sparql query.
     *
     * @param model the Graph representing the Calais data
     *
     * @return a Collection of entity information
     * @throws EngineException on a {@link ParseException} while processing the
     * Sparql query.
     */
public Collection<CalaisEntityOccurrence> queryModel(Graph model) throws EngineException {
    //TODO extract also Geo info (latitude/longitude)?
    String query = "PREFIX rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#> " + "PREFIX xsd: <http://www.w3.org/2001/XMLSchema#> " + "PREFIX p: <http://s.opencalais.com/1/pred/> " + "PREFIX t: <http://s.opencalais.com/1/type/em/e/> " + "SELECT DISTINCT ?id ?did ?name ?type ?dtype ?offset ?length ?exact ?context ?score WHERE { " + "?id p:name ?name ." + "?id rdf:type ?type ." + "?y p:subject ?id ." + "?y p:offset ?offset ." + "?y p:length ?length ." + "?y p:exact ?exact ." + "?y p:detection ?context ." + " OPTIONAL { ?z p:subject ?id . ?z p:relevance ?score . } " + // get disambiguated entity references if available
    " OPTIONAL { ?did p:subject ?id . ?did p:name ?name . ?did rdf:type ?dtype . } " + "FILTER (" + "?type = t:Person || " + "?type = t:City || " + "?type = t:Continent || " + "?type = t:Country || " + "?type = t:ProvinceOrState || " + "?type = t:Region || " + "?type = t:Company || " + "?type = t:Facility || " + "?type = t:Organization " + ")" + "} ";
    Collection<CalaisEntityOccurrence> result = new ArrayList<CalaisEntityOccurrence>();
    try {
        SelectQuery sQuery = (SelectQuery) QueryParser.getInstance().parse(query);
        ResultSet rs = tcManager.executeSparqlQuery(sQuery, model);
        while (rs.hasNext()) {
            SolutionMapping row = rs.next();
            CalaisEntityOccurrence occ = new CalaisEntityOccurrence();
            RDFTerm disambiguated = row.get("did");
            occ.id = (disambiguated == null ? row.get("id") : disambiguated);
            if (onlyNERMode) {
                occ.type = row.get("type");
            } else {
                occ.type = (disambiguated == null ? row.get("type") : row.get("dtype"));
            }
            if (calaisTypeMap != null) {
                IRI mappedType = calaisTypeMap.get(occ.type);
                if (mappedType != null) {
                    occ.type = mappedType;
                }
            }
            occ.name = ((Literal) row.get("name")).getLexicalForm();
            occ.exact = ((Literal) row.get("exact")).getLexicalForm();
            //TODO for html the offsets might not be those of the original document but refer to a cleaned up version?
            occ.offset = Integer.valueOf(((Literal) row.get("offset")).getLexicalForm());
            // remove brackets
            occ.context = ((Literal) row.get("context")).getLexicalForm().replaceAll("[\\[\\]]", "");
            occ.length = Integer.valueOf(((Literal) row.get("length")).getLexicalForm());
            if (row.get("score") != null) {
                occ.relevance = Double.valueOf(((Literal) row.get("score")).getLexicalForm());
            }
            result.add(occ);
        }
    } catch (ParseException e) {
        throw new EngineException("Unable to parse SPARQL query for processing OpenCalais results", e);
    }
    log.info("Found {} occurences", result.size());
    return result;
}
Also used : SelectQuery(org.apache.clerezza.rdf.core.sparql.query.SelectQuery) IRI(org.apache.clerezza.commons.rdf.IRI) BlankNodeOrIRI(org.apache.clerezza.commons.rdf.BlankNodeOrIRI) SolutionMapping(org.apache.clerezza.rdf.core.sparql.SolutionMapping) Literal(org.apache.clerezza.commons.rdf.Literal) ArrayList(java.util.ArrayList) ResultSet(org.apache.clerezza.rdf.core.sparql.ResultSet) EngineException(org.apache.stanbol.enhancer.servicesapi.EngineException) RDFTerm(org.apache.clerezza.commons.rdf.RDFTerm) ParseException(org.apache.clerezza.rdf.core.sparql.ParseException)

Example 17 with RDFTerm

use of org.apache.clerezza.commons.rdf.RDFTerm in project stanbol by apache.

the class TestOpenCalaisEngine method testEntityExtraction.

@Test
public void testEntityExtraction() throws IOException, EngineException {
    String testFile = "calaisresult.owl";
    String format = "application/rdf+xml";
    InputStream in = this.getClass().getClassLoader().getResourceAsStream(testFile);
    Assert.assertNotNull("failed to load resource " + testFile, in);
    Graph model = calaisExtractor.readModel(in, format);
    Assert.assertNotNull("model reader failed with format: " + format, model);
    Collection<CalaisEntityOccurrence> entities;
    try {
        entities = calaisExtractor.queryModel(model);
    } catch (EngineException e) {
        RemoteServiceHelper.checkServiceUnavailable(e);
        return;
    }
    LOG.info("Found entities: {}", entities.size());
    LOG.debug("Entities:\n{}", entities);
    Assert.assertFalse("No entities found!", entities.isEmpty());
    //test the generation of the Enhancements
    ContentItem ci = wrapAsContentItem(TEST_TEXT);
    calaisExtractor.createEnhancements(entities, ci);
    Map<IRI, RDFTerm> expectedValues = new HashMap<IRI, RDFTerm>();
    expectedValues.put(Properties.ENHANCER_EXTRACTED_FROM, ci.getUri());
    expectedValues.put(Properties.DC_CREATOR, LiteralFactory.getInstance().createTypedLiteral(calaisExtractor.getClass().getName()));
    //adding null as expected for confidence makes it a required property
    expectedValues.put(Properties.ENHANCER_CONFIDENCE, null);
    validateAllTextAnnotations(ci.getMetadata(), TEST_TEXT, expectedValues);
    validateAllEntityAnnotations(ci.getMetadata(), expectedValues);
}
Also used : IRI(org.apache.clerezza.commons.rdf.IRI) Graph(org.apache.clerezza.commons.rdf.Graph) HashMap(java.util.HashMap) InputStream(java.io.InputStream) EngineException(org.apache.stanbol.enhancer.servicesapi.EngineException) RDFTerm(org.apache.clerezza.commons.rdf.RDFTerm) ContentItem(org.apache.stanbol.enhancer.servicesapi.ContentItem) Test(org.junit.Test)

Example 18 with RDFTerm

use of org.apache.clerezza.commons.rdf.RDFTerm in project stanbol by apache.

the class AbstractOntologyCollectorImpl method exportToGraph.

/**
     * This method has no conversion calls, to it can be invoked by subclasses that wish to modify it
     * afterwards.
     * 
     * @param merge
     * @return
     */
protected Graph exportToGraph(boolean merge, org.semanticweb.owlapi.model.IRI prefix) {
    // if (merge) throw new UnsupportedOperationException(
    // "Merge not implemented yet for Clerezza triple collections.");
    long before = System.currentTimeMillis();
    // No need to store, give it a name, or anything.
    Graph root = new SimpleGraph();
    IRI iri = new IRI(prefix + _id);
    // Add the import declarations for directly managed ontologies.
    if (root != null) {
        // Set the ontology ID
        root.add(new TripleImpl(iri, RDF.type, OWL.Ontology));
        if (merge) {
            log.warn("Merging of Clerezza triple collections is only implemented one level down. Import statements will be preserved for further levels.");
            Iterator<Triple> it;
            Set<RDFTerm> importTargets = new HashSet<RDFTerm>();
            for (OWLOntologyID ontologyId : managedOntologies) {
                ImmutableGraph g = getOntology(ontologyId, ImmutableGraph.class, false);
                root.addAll(g);
                it = g.filter(null, OWL.imports, null);
                while (it.hasNext()) {
                    org.semanticweb.owlapi.model.IRI tgt;
                    RDFTerm r = it.next().getObject();
                    try {
                        if (r instanceof IRI)
                            tgt = org.semanticweb.owlapi.model.IRI.create(((IRI) r).getUnicodeString());
                        else if (r instanceof Literal)
                            tgt = org.semanticweb.owlapi.model.IRI.create(((Literal) r).getLexicalForm());
                        else
                            tgt = org.semanticweb.owlapi.model.IRI.create(r.toString());
                        tgt = URIUtils.sanitize(tgt);
                        importTargets.add(new IRI(tgt.toString()));
                    } catch (Exception ex) {
                        log.error("FAILED to obtain import target from resource {}", r);
                        continue;
                    }
                }
                it = g.filter(null, RDF.type, OWL.Ontology);
                while (it.hasNext()) {
                    BlankNodeOrIRI ontology = it.next().getSubject();
                    log.debug("Removing all triples related to {} from {}", ontology, iri);
                    Iterator<Triple> it2 = g.filter(ontology, null, null);
                    while (it2.hasNext()) root.remove(it2.next());
                }
                /*
                     * Reinstate import statements, though. If imported ontologies were not merged earlier, we
                     * are not doing it now anyway.
                     */
                for (RDFTerm target : importTargets) root.add(new TripleImpl(iri, OWL.imports, target));
            }
        } else {
            String base = prefix + getID();
            for (int i = 0; i < backwardPathLength; i++) base = URIUtils.upOne(URI.create(base)).toString();
            base += "/";
            // The key set of managedOntologies contains the ontology IRIs, not their storage keys.
            for (OWLOntologyID ontologyId : managedOntologies) {
                org.semanticweb.owlapi.model.IRI physIRI = // .create(base + ontologyId.getVersionIRI()));
                org.semanticweb.owlapi.model.IRI.create(base + OntologyUtils.encode(ontologyId));
                root.add(new TripleImpl(iri, OWL.imports, new IRI(physIRI.toString())));
            }
        }
        log.debug("Clerezza export of {} completed in {} ms.", getID(), System.currentTimeMillis() - before);
    }
    return root;
}
Also used : IRI(org.apache.clerezza.commons.rdf.IRI) BlankNodeOrIRI(org.apache.clerezza.commons.rdf.BlankNodeOrIRI) BlankNodeOrIRI(org.apache.clerezza.commons.rdf.BlankNodeOrIRI) RDFTerm(org.apache.clerezza.commons.rdf.RDFTerm) OWLOntologyCreationException(org.semanticweb.owlapi.model.OWLOntologyCreationException) MissingOntologyException(org.apache.stanbol.ontologymanager.servicesapi.collector.MissingOntologyException) OWLOntologyAlreadyExistsException(org.semanticweb.owlapi.model.OWLOntologyAlreadyExistsException) UnmodifiableOntologyCollectorException(org.apache.stanbol.ontologymanager.servicesapi.collector.UnmodifiableOntologyCollectorException) OntologyCollectorModificationException(org.apache.stanbol.ontologymanager.servicesapi.collector.OntologyCollectorModificationException) Triple(org.apache.clerezza.commons.rdf.Triple) ImmutableGraph(org.apache.clerezza.commons.rdf.ImmutableGraph) IndexedGraph(org.apache.stanbol.commons.indexedgraph.IndexedGraph) SimpleGraph(org.apache.clerezza.commons.rdf.impl.utils.simple.SimpleGraph) Graph(org.apache.clerezza.commons.rdf.Graph) OWLOntologyID(org.semanticweb.owlapi.model.OWLOntologyID) Literal(org.apache.clerezza.commons.rdf.Literal) SimpleGraph(org.apache.clerezza.commons.rdf.impl.utils.simple.SimpleGraph) TripleImpl(org.apache.clerezza.commons.rdf.impl.utils.TripleImpl) ImmutableGraph(org.apache.clerezza.commons.rdf.ImmutableGraph) HashSet(java.util.HashSet)

Example 19 with RDFTerm

use of org.apache.clerezza.commons.rdf.RDFTerm in project stanbol by apache.

the class ScopeImpl method exportToGraph.

/**
     * Get a Clerezza {@link Graph} representation of the scope.
     * 
     * @param merge
     *            if true the core and custom spaces will be recursively merged with the scope graph,
     *            otherwise owl:imports statements will be added.
     * @return the RDF representation of the scope as a modifiable graph.
     */
protected Graph exportToGraph(boolean merge, org.semanticweb.owlapi.model.IRI universalPrefix) {
    // No need to store, give it a name, or anything.
    Graph root = new SimpleGraph();
    IRI iri = new IRI(universalPrefix + getID());
    if (root != null) {
        // Set the ontology ID
        root.add(new TripleImpl(iri, RDF.type, OWL.Ontology));
        if (merge) {
            ImmutableGraph custom, core;
            // Get the subjects of "bad" triples (those with subjects of type owl:Ontology).
            Iterator<Triple> it;
            Set<BlankNodeOrIRI> ontologies = new HashSet<BlankNodeOrIRI>();
            Set<RDFTerm> importTargets = new HashSet<RDFTerm>();
            custom = this.getCustomSpace().export(ImmutableGraph.class, merge);
            // root.addAll(space);
            it = custom.filter(null, RDF.type, OWL.Ontology);
            while (it.hasNext()) ontologies.add(it.next().getSubject());
            it = custom.filter(null, OWL.imports, null);
            while (it.hasNext()) importTargets.add(it.next().getObject());
            core = this.getCoreSpace().export(ImmutableGraph.class, merge);
            // root.addAll(space);
            it = core.filter(null, RDF.type, OWL.Ontology);
            while (it.hasNext()) ontologies.add(it.next().getSubject());
            it = core.filter(null, OWL.imports, null);
            while (it.hasNext()) importTargets.add(it.next().getObject());
            // Make sure the scope itself is not in the "bad" subjects.
            ontologies.remove(iri);
            for (BlankNodeOrIRI nl : ontologies) log.debug("{} -related triples will not be added to {}", nl, iri);
            // Merge the two spaces, skipping the "bad" triples.
            log.debug("Merging custom space of {}.", getID());
            for (Triple t : custom) if (!ontologies.contains(t.getSubject()))
                root.add(t);
            log.debug("Merging core space of {}.", getID());
            for (Triple t : core) if (!ontologies.contains(t.getSubject()))
                root.add(t);
            /*
                 * Reinstate import statements, though. If imported ontologies were not merged earlier, we are
                 * not doing it now anyway.
                 */
            for (RDFTerm target : importTargets) root.add(new TripleImpl(iri, OWL.imports, target));
        } else {
            IRI physIRI = new IRI(universalPrefix.toString() + this.getID() + "/" + SpaceType.CUSTOM.getIRISuffix());
            root.add(new TripleImpl(iri, OWL.imports, physIRI));
            physIRI = new IRI(universalPrefix.toString() + this.getID() + "/" + SpaceType.CORE.getIRISuffix());
            root.add(new TripleImpl(iri, OWL.imports, physIRI));
        }
    }
    return root;
}
Also used : Triple(org.apache.clerezza.commons.rdf.Triple) IRI(org.apache.clerezza.commons.rdf.IRI) BlankNodeOrIRI(org.apache.clerezza.commons.rdf.BlankNodeOrIRI) ImmutableGraph(org.apache.clerezza.commons.rdf.ImmutableGraph) SimpleGraph(org.apache.clerezza.commons.rdf.impl.utils.simple.SimpleGraph) Graph(org.apache.clerezza.commons.rdf.Graph) SimpleGraph(org.apache.clerezza.commons.rdf.impl.utils.simple.SimpleGraph) BlankNodeOrIRI(org.apache.clerezza.commons.rdf.BlankNodeOrIRI) RDFTerm(org.apache.clerezza.commons.rdf.RDFTerm) TripleImpl(org.apache.clerezza.commons.rdf.impl.utils.TripleImpl) ImmutableGraph(org.apache.clerezza.commons.rdf.ImmutableGraph) HashSet(java.util.HashSet)

Example 20 with RDFTerm

use of org.apache.clerezza.commons.rdf.RDFTerm in project stanbol by apache.

the class MultiThreadedTestBase method createRdfDataIterator.

/**
     * Iterator implementation that parses an RDF graph from the parsed
     * {@link InputStream}. The RDF data are loaded in-memory. Because of this
     * only test data that fit in-memory can be used. <p>
     * Literal values (objects) of the {@link #PROPERTY_TEST_DATA_PROPERTY} are
     * used as data. If this property is not present {@link #DEFAULT_TEST_DATA_PROPERTY}
     * is used. If {@link #PROPERTY_TEST_DATA_PROPERTY} is set to '*' than all
     * Triples with Literal values are used.<p>
     * This supports all RDF-formats supported by the {@link JenaParserProvider} and
     * {@link RdfJsonParsingProvider}. The charset is expected to be UTF-8.
     * @param is the input stream providing the RDF test data.
     * @param mediaType the Media-Type of the stream. MUST BE supported by
     * the Apache Clerezza RDF parsers.
     */
private Iterator<String> createRdfDataIterator(InputStream is, String mediaType, final String propertyString) {
    final SimpleGraph graph = new SimpleGraph();
    try {
        rdfParser.parse(graph, is, mediaType);
    } catch (UnsupportedFormatException e) {
        Assert.fail("The MimeType '" + mediaType + "' of the parsed testData " + "is not supported. This utility supports plain text files as " + "as well as the RDF formats " + rdfParser.getSupportedFormats() + "If your test data uses one of those formats but it was not " + "correctly detected you can use the System property '" + PROPERTY_TEST_DATA_TYPE + "' to manually parse the Media-Type!");
    }
    IOUtils.closeQuietly(is);
    return new Iterator<String>() {

        Iterator<Triple> it = null;

        String next = null;

        private String getNext() {
            if (it == null) {
                IRI property;
                if ("*".equals(propertyString)) {
                    //wildcard
                    property = null;
                    log.info("Iterate over values of all Triples");
                } else {
                    property = new IRI(NamespaceMappingUtils.getConfiguredUri(nsPrefixService, propertyString));
                    log.info("Iterate over values of property {}", property);
                }
                it = graph.filter(null, property, null);
            }
            while (it.hasNext()) {
                RDFTerm value = it.next().getObject();
                if (value instanceof Literal) {
                    return ((Literal) value).getLexicalForm();
                }
            }
            //no more data
            return null;
        }

        @Override
        public boolean hasNext() {
            if (next == null) {
                next = getNext();
            }
            return next != null;
        }

        @Override
        public String next() {
            if (next == null) {
                next = getNext();
            }
            if (next == null) {
                throw new NoSuchElementException("No further testData available");
            } else {
                String elem = next;
                next = null;
                return elem;
            }
        }

        @Override
        public void remove() {
            throw new UnsupportedOperationException();
        }
    };
}
Also used : IRI(org.apache.clerezza.commons.rdf.IRI) UnsupportedFormatException(org.apache.clerezza.rdf.core.serializedform.UnsupportedFormatException) Literal(org.apache.clerezza.commons.rdf.Literal) SimpleGraph(org.apache.clerezza.commons.rdf.impl.utils.simple.SimpleGraph) Iterator(java.util.Iterator) RDFTerm(org.apache.clerezza.commons.rdf.RDFTerm) NoSuchElementException(java.util.NoSuchElementException)

Aggregations

RDFTerm (org.apache.clerezza.commons.rdf.RDFTerm)126 IRI (org.apache.clerezza.commons.rdf.IRI)84 Triple (org.apache.clerezza.commons.rdf.Triple)70 BlankNodeOrIRI (org.apache.clerezza.commons.rdf.BlankNodeOrIRI)48 Literal (org.apache.clerezza.commons.rdf.Literal)35 Test (org.junit.Test)35 HashSet (java.util.HashSet)30 HashMap (java.util.HashMap)28 TripleImpl (org.apache.clerezza.commons.rdf.impl.utils.TripleImpl)26 Graph (org.apache.clerezza.commons.rdf.Graph)24 ContentItem (org.apache.stanbol.enhancer.servicesapi.ContentItem)18 ArrayList (java.util.ArrayList)17 PlainLiteralImpl (org.apache.clerezza.commons.rdf.impl.utils.PlainLiteralImpl)16 EngineException (org.apache.stanbol.enhancer.servicesapi.EngineException)13 OWLOntologyID (org.semanticweb.owlapi.model.OWLOntologyID)13 SimpleGraph (org.apache.clerezza.commons.rdf.impl.utils.simple.SimpleGraph)12 Collection (java.util.Collection)10 IndexedGraph (org.apache.stanbol.commons.indexedgraph.IndexedGraph)10 Lock (java.util.concurrent.locks.Lock)9 IOException (java.io.IOException)5