use of org.openrdf.repository.RepositoryConnection in project incubator-rya by apache.
the class RdfCloudTripleStoreConnectionTest method testDropGraph.
public void testDropGraph() throws Exception {
RepositoryConnection conn = repository.getConnection();
String insert = "PREFIX dc: <http://purl.org/dc/elements/1.1/>\n" + "PREFIX ex: <http://example/addresses#>\n" + "INSERT DATA\n" + "{ GRAPH ex:G1 {\n" + "<http://example/book3> dc:title \"A new book\" ;\n" + " dc:creator \"A.N.Other\" .\n" + "}\n" + "}";
Update update = conn.prepareUpdate(QueryLanguage.SPARQL, insert);
update.execute();
insert = "PREFIX dc: <http://purl.org/dc/elements/1.1/>\n" + "PREFIX ex: <http://example/addresses#>\n" + "INSERT DATA\n" + "{ GRAPH ex:G2 {\n" + "<http://example/book3> dc:title \"A new book\" ;\n" + " dc:creator \"A.N.Other\" .\n" + "}\n" + "}";
update = conn.prepareUpdate(QueryLanguage.SPARQL, insert);
update.execute();
String query = "PREFIX dc: <http://purl.org/dc/elements/1.1/>\n" + "select * where { <http://example/book3> ?p ?o. }";
TupleQuery tupleQuery = conn.prepareTupleQuery(QueryLanguage.SPARQL, query);
CountTupleHandler tupleHandler = new CountTupleHandler();
tupleQuery.evaluate(tupleHandler);
assertEquals(4, tupleHandler.getCount());
tupleHandler = new CountTupleHandler();
String drop = "PREFIX ex: <http://example/addresses#>\n" + "DROP GRAPH ex:G2 ";
update = conn.prepareUpdate(QueryLanguage.SPARQL, drop);
update.execute();
tupleQuery.evaluate(tupleHandler);
assertEquals(2, tupleHandler.getCount());
tupleHandler = new CountTupleHandler();
drop = "PREFIX ex: <http://example/addresses#>\n" + "DROP GRAPH ex:G1 ";
update = conn.prepareUpdate(QueryLanguage.SPARQL, drop);
update.execute();
tupleQuery.evaluate(tupleHandler);
assertEquals(0, tupleHandler.getCount());
conn.close();
}
use of org.openrdf.repository.RepositoryConnection in project incubator-rya by apache.
the class RdfCloudTripleStoreConnectionTest method testSubPropertyOf.
public void testSubPropertyOf() throws Exception {
if (internalInferenceEngine == null) {
// infer not supported;
return;
}
RepositoryConnection conn = repository.getConnection();
conn.add(new StatementImpl(vf.createURI(litdupsNS, "undergradDegreeFrom"), RDFS.SUBPROPERTYOF, vf.createURI(litdupsNS, "degreeFrom")));
conn.add(new StatementImpl(vf.createURI(litdupsNS, "gradDegreeFrom"), RDFS.SUBPROPERTYOF, vf.createURI(litdupsNS, "degreeFrom")));
conn.add(new StatementImpl(vf.createURI(litdupsNS, "degreeFrom"), RDFS.SUBPROPERTYOF, vf.createURI(litdupsNS, "memberOf")));
conn.add(new StatementImpl(vf.createURI(litdupsNS, "memberOf"), RDFS.SUBPROPERTYOF, vf.createURI(litdupsNS, "associatedWith")));
conn.add(new StatementImpl(vf.createURI(litdupsNS, "UgradA"), vf.createURI(litdupsNS, "undergradDegreeFrom"), vf.createURI(litdupsNS, "Harvard")));
conn.add(new StatementImpl(vf.createURI(litdupsNS, "GradB"), vf.createURI(litdupsNS, "gradDegreeFrom"), vf.createURI(litdupsNS, "Yale")));
conn.add(new StatementImpl(vf.createURI(litdupsNS, "ProfessorC"), vf.createURI(litdupsNS, "memberOf"), vf.createURI(litdupsNS, "Harvard")));
conn.commit();
conn.close();
internalInferenceEngine.refreshGraph();
conn = repository.getConnection();
String query = "PREFIX rdfs: <" + RDFS.NAMESPACE + ">\n" + "PREFIX rdf: <" + RDF.NAMESPACE + ">\n" + "PREFIX lit: <" + litdupsNS + ">\n" + "select * where {?s lit:degreeFrom lit:Harvard.}";
TupleQuery tupleQuery = conn.prepareTupleQuery(QueryLanguage.SPARQL, query);
CountTupleHandler tupleHandler = new CountTupleHandler();
tupleQuery.evaluate(tupleHandler);
assertEquals(1, tupleHandler.getCount());
query = "PREFIX rdfs: <" + RDFS.NAMESPACE + ">\n" + "PREFIX rdf: <" + RDF.NAMESPACE + ">\n" + "PREFIX lit: <" + litdupsNS + ">\n" + "select * where {?s lit:memberOf lit:Harvard.}";
tupleQuery = conn.prepareTupleQuery(QueryLanguage.SPARQL, query);
tupleHandler = new CountTupleHandler();
tupleQuery.evaluate(tupleHandler);
assertEquals(2, tupleHandler.getCount());
query = "PREFIX rdfs: <" + RDFS.NAMESPACE + ">\n" + "PREFIX rdf: <" + RDF.NAMESPACE + ">\n" + "PREFIX lit: <" + litdupsNS + ">\n" + "select * where {?s lit:associatedWith ?o.}";
tupleQuery = conn.prepareTupleQuery(QueryLanguage.SPARQL, query);
tupleHandler = new CountTupleHandler();
tupleQuery.evaluate(tupleHandler);
assertEquals(3, tupleHandler.getCount());
query = "PREFIX rdfs: <" + RDFS.NAMESPACE + ">\n" + "PREFIX rdf: <" + RDF.NAMESPACE + ">\n" + "PREFIX lit: <" + litdupsNS + ">\n" + "select * where {?s lit:gradDegreeFrom lit:Yale.}";
tupleQuery = conn.prepareTupleQuery(QueryLanguage.SPARQL, query);
tupleHandler = new CountTupleHandler();
tupleQuery.evaluate(tupleHandler);
assertEquals(1, tupleHandler.getCount());
conn.close();
}
use of org.openrdf.repository.RepositoryConnection in project incubator-rya by apache.
the class RdfCloudTripleStoreConnectionTest method testUpdateData.
public void testUpdateData() throws Exception {
RepositoryConnection conn = repository.getConnection();
String insert = "PREFIX dc: <http://purl.org/dc/elements/1.1/>\n" + "PREFIX ex: <http://example/addresses#>\n" + "INSERT DATA\n" + "{ GRAPH ex:G1 {\n" + "<http://example/book3> dc:title \"A new book\" ;\n" + " dc:creator \"A.N.Other\" .\n" + "}\n" + "}";
Update update = conn.prepareUpdate(QueryLanguage.SPARQL, insert);
update.execute();
String query = "PREFIX dc: <http://purl.org/dc/elements/1.1/>\n" + "select * where { <http://example/book3> ?p ?o. }";
TupleQuery tupleQuery = conn.prepareTupleQuery(QueryLanguage.SPARQL, query);
CountTupleHandler tupleHandler = new CountTupleHandler();
tupleQuery.evaluate(tupleHandler);
assertEquals(2, tupleHandler.getCount());
String insdel = "PREFIX dc: <http://purl.org/dc/elements/1.1/>\n" + "\n" + "WITH <http://example/addresses#G1>\n" + "DELETE { ?book dc:title ?title }\n" + "INSERT { ?book dc:title \"A newer book\"." + " ?book dc:add \"Additional Info\" }\n" + "WHERE\n" + " { ?book dc:creator \"A.N.Other\" ;\n" + " dc:title ?title .\n" + " }";
update = conn.prepareUpdate(QueryLanguage.SPARQL, insdel);
update.execute();
query = "PREFIX dc: <http://purl.org/dc/elements/1.1/>\n" + "PREFIX ex: <http://example/addresses#>\n" + "select * where { GRAPH ex:G1 {<http://example/book3> ?p ?o. } }";
tupleQuery = conn.prepareTupleQuery(QueryLanguage.SPARQL, query);
tupleHandler = new CountTupleHandler();
tupleQuery.evaluate(tupleHandler);
assertEquals(3, tupleHandler.getCount());
conn.close();
}
use of org.openrdf.repository.RepositoryConnection in project incubator-rya by apache.
the class RdfControllerAccumuloTest method setup.
@Before
public void setup() {
this.mockMvc = standaloneSetup(controller).build();
try {
RepositoryConnection con = repository.getConnection();
con.add(getClass().getResourceAsStream("/test.nt"), "", RDFFormat.NTRIPLES);
con.close();
} catch (Exception e) {
e.printStackTrace();
throw new RuntimeException(e);
}
}
use of org.openrdf.repository.RepositoryConnection in project incubator-rya by apache.
the class ConformanceTest method runTest.
/**
* Verify that we can infer the correct triples or detect an inconsistency.
* @param conf Specifies working directory, etc.
* @param OwlTest Contains premise/conclusion graphs, will store result
* @return Return value of the MapReduce job
*/
int runTest(final Configuration conf, final String[] args, final OwlTest test) throws Exception {
conf.setInt(MRReasoningUtils.STEP_PROP, 0);
conf.setInt(MRReasoningUtils.SCHEMA_UPDATE_PROP, 0);
conf.setBoolean(MRReasoningUtils.DEBUG_FLAG, true);
conf.setBoolean(MRReasoningUtils.OUTPUT_FLAG, true);
// Connect to MiniAccumulo and load the test
final Repository repo = MRReasoningUtils.getRepository(conf);
repo.initialize();
final RepositoryConnection conn = repo.getConnection();
conn.clear();
conn.add(new StringReader(test.premise), "", RDFFormat.RDFXML);
conn.close();
repo.shutDown();
// Run the reasoner
final ReasoningDriver reasoner = new ReasoningDriver();
final int result = ToolRunner.run(conf, reasoner, args);
test.success = (result == 0);
// Inconsistency test: successful if determined inconsistent
if (test.types.contains(TEST_INCONSISTENCY)) {
test.success = test.success && reasoner.hasInconsistencies();
}
// Consistency test: successful if determined consistent
if (test.types.contains(TEST_CONSISTENCY)) {
test.success = test.success && !reasoner.hasInconsistencies();
}
// Other types: we'll need to look at the inferred triples/schema
if (test.types.contains(TEST_NONENTAILMENT) || test.types.contains(TEST_ENTAILMENT)) {
System.out.println("Reading inferred triples...");
// Read in the inferred triples from HDFS:
final Schema schema = MRReasoningUtils.loadSchema(conf);
final FileSystem fs = FileSystem.get(conf);
final Path configuredPath = MRReasoningUtils.getOutputPath(conf, "final");
final Path path = PathUtils.cleanHadoopPath(configuredPath, conf);
final OutputCollector inferred = new OutputCollector();
final NTriplesParser parser = new NTriplesParser();
parser.setRDFHandler(inferred);
if (fs.isDirectory(path)) {
for (final FileStatus status : fs.listStatus(path)) {
final String s = status.getPath().getName();
if (s.startsWith(MRReasoningUtils.INCONSISTENT_OUT) || s.startsWith(MRReasoningUtils.DEBUG_OUT)) {
continue;
}
final BufferedReader br = new BufferedReader(new InputStreamReader(fs.open(status.getPath()), StandardCharsets.UTF_8));
parser.parse(br, "");
br.close();
}
}
MRReasoningUtils.deleteIfExists(conf, "final");
test.inferred.addAll(inferred.triples);
// Entailment test: successful if expected triples were inferred
if (test.types.contains(TEST_ENTAILMENT)) {
// the schema reasoner
for (final Statement st : test.expected) {
final Fact fact = new Fact(st);
if (!test.inferred.contains(st) && !triviallyTrue(fact.getTriple(), schema) && !schema.containsTriple(fact.getTriple())) {
test.error.add(st);
}
}
}
// Non-entailment test: failure if non-expected triples inferred
if (test.types.contains(TEST_NONENTAILMENT)) {
for (final Statement st : test.unexpected) {
final Fact fact = new Fact(st);
if (test.inferred.contains(st) || schema.containsTriple(fact.getTriple())) {
test.error.add(st);
}
}
}
test.success = test.success && test.error.isEmpty();
}
conf.setBoolean(MRReasoningUtils.DEBUG_FLAG, false);
MRReasoningUtils.clean(conf);
return result;
}
Aggregations