use of org.openrdf.repository.sail.SailRepositoryConnection in project incubator-rya by apache.
the class RyaInputIncrementalUpdateIT method historicAndStreamMultiVariables.
@Test
public void historicAndStreamMultiVariables() throws Exception {
// A query that finds people who talk to other people and work at Chipotle.
final String sparql = "SELECT ?x ?y " + "WHERE { " + "?x <http://talksTo> ?y. " + "?x <http://worksAt> <http://Chipotle>." + "}";
// Triples that are loaded into Rya before the PCJ is created.
final ValueFactory vf = new ValueFactoryImpl();
final Set<Statement> historicTriples = Sets.newHashSet(vf.createStatement(vf.createURI("http://Alice"), vf.createURI("http://talksTo"), vf.createURI("http://Eve")), vf.createStatement(vf.createURI("http://Alice"), vf.createURI("http://worksAt"), vf.createURI("http://Chipotle")), vf.createStatement(vf.createURI("http://Joe"), vf.createURI("http://worksAt"), vf.createURI("http://Chipotle")));
// Triples that will be streamed into Fluo after the PCJ has been
final Set<Statement> streamedTriples = Sets.newHashSet(vf.createStatement(vf.createURI("http://Frank"), vf.createURI("http://talksTo"), vf.createURI("http://Betty")), vf.createStatement(vf.createURI("http://Joe"), vf.createURI("http://talksTo"), vf.createURI("http://Alice")), vf.createStatement(vf.createURI("http://Frank"), vf.createURI("http://worksAt"), vf.createURI("http://Chipotle")));
// Load the historic data into Rya.
final SailRepositoryConnection ryaConn = super.getRyaSailRepository().getConnection();
for (final Statement triple : historicTriples) {
ryaConn.add(triple);
}
// Create the PCJ table.
final Connector accumuloConn = super.getAccumuloConnector();
final PrecomputedJoinStorage pcjStorage = new AccumuloPcjStorage(accumuloConn, getRyaInstanceName());
final String pcjId = pcjStorage.createPcj(sparql);
try (FluoClient fluoClient = FluoFactory.newClient(super.getFluoConfiguration())) {
// Tell the Fluo app to maintain the PCJ.
new CreateFluoPcj().withRyaIntegration(pcjId, pcjStorage, fluoClient, accumuloConn, getRyaInstanceName());
super.getMiniFluo().waitForObservers();
// Load the streaming data into Rya.
for (final Statement triple : streamedTriples) {
ryaConn.add(triple);
}
// Ensure Alice is a match.
super.getMiniFluo().waitForObservers();
final Set<BindingSet> expected = new HashSet<>();
MapBindingSet bs = new MapBindingSet();
bs.addBinding("x", vf.createURI("http://Alice"));
bs.addBinding("y", vf.createURI("http://Eve"));
expected.add(bs);
bs = new MapBindingSet();
bs.addBinding("x", vf.createURI("http://Frank"));
bs.addBinding("y", vf.createURI("http://Betty"));
expected.add(bs);
bs = new MapBindingSet();
bs.addBinding("x", vf.createURI("http://Joe"));
bs.addBinding("y", vf.createURI("http://Alice"));
expected.add(bs);
final Set<BindingSet> results = new HashSet<>();
try (CloseableIterator<BindingSet> resultIt = pcjStorage.listResults(pcjId)) {
while (resultIt.hasNext()) {
results.add(resultIt.next());
}
}
assertEquals(expected, results);
}
}
use of org.openrdf.repository.sail.SailRepositoryConnection in project incubator-rya by apache.
the class KafkaExportITBase method loadData.
protected void loadData(final Collection<Statement> statements) throws Exception {
requireNonNull(statements);
final SailRepositoryConnection ryaConn = getRyaSailRepository().getConnection();
ryaConn.begin();
ryaConn.add(statements);
ryaConn.commit();
ryaConn.close();
// Wait for the Fluo application to finish computing the end result.
super.getMiniFluo().waitForObservers();
}
use of org.openrdf.repository.sail.SailRepositoryConnection in project incubator-rya by apache.
the class RulesetCopyIT method runQuery.
private Set<BindingSet> runQuery(final String query, final Configuration conf) throws Exception {
SailRepository repository = null;
SailRepositoryConnection conn = null;
try {
final Sail extSail = RyaSailFactory.getInstance(conf);
repository = new SailRepository(extSail);
conn = repository.getConnection();
final ResultHandler handler = new ResultHandler();
final TupleQuery tq = conn.prepareTupleQuery(QueryLanguage.SPARQL, query);
tq.evaluate(handler);
return handler.getSolutions();
} finally {
if (conn != null) {
conn.close();
}
if (repository != null) {
repository.shutDown();
}
}
}
use of org.openrdf.repository.sail.SailRepositoryConnection in project incubator-rya by apache.
the class SpinConstructRule method loadSpinRules.
/**
* Load a set of SPIN rules from a data store.
* @param conf Contains the connection information. Not null.
* @return A map of rule identifiers to rule objects.
* @throws ForwardChainException if connecting, querying for rules, or
* parsing rules fails.
*/
public static Ruleset loadSpinRules(RdfCloudTripleStoreConfiguration conf) throws ForwardChainException {
Preconditions.checkNotNull(conf);
Map<Resource, Rule> rules = new ConcurrentHashMap<>();
// Connect to Rya
SailRepository repository = null;
SailRepositoryConnection conn = null;
try {
repository = new SailRepository(RyaSailFactory.getInstance(conf));
} catch (Exception e) {
throw new ForwardChainException("Couldn't initialize SAIL from configuration", e);
}
// Load and parse the individual SPIN rules from the data store
String ruleQueryString = "SELECT ?type ?rule ?text WHERE {\n" + " ?type <" + SPIN.RULE_PROPERTY.stringValue() + "> ?rule .\n" + " {\n" + " ?rule a <" + SP.CONSTRUCT_CLASS.stringValue() + "> .\n" + " ?rule <" + SP.TEXT_PROPERTY.stringValue() + "> ?text .\n" + " } UNION {\n" + " ?rule a ?template .\n" + " ?template <" + SPIN.BODY_PROPERTY + ">? ?body .\n" + " ?body a <" + SP.CONSTRUCT_CLASS.stringValue() + "> .\n" + " ?body <" + SP.TEXT_PROPERTY.stringValue() + "> ?text .\n" + " }\n" + "}";
SPARQLParser parser = new SPARQLParser();
try {
conn = repository.getConnection();
TupleQuery ruleQuery = conn.prepareTupleQuery(QueryLanguage.SPARQL, ruleQueryString);
ruleQuery.evaluate(new TupleQueryResultHandlerBase() {
@Override
public void handleSolution(BindingSet bs) throws TupleQueryResultHandlerException {
// For each rule identifier found, instantiate a SpinRule
Value requiredType = bs.getValue("type");
Value ruleIdentifier = bs.getValue("rule");
Value ruleText = bs.getValue("text");
if (requiredType instanceof Resource && ruleIdentifier instanceof Resource && ruleText instanceof Literal) {
ParsedQuery parsedRule;
try {
parsedRule = parser.parseQuery(ruleText.stringValue(), null);
if (parsedRule instanceof ParsedGraphQuery) {
SpinConstructRule rule = new SpinConstructRule((Resource) requiredType, (Resource) ruleIdentifier, (ParsedGraphQuery) parsedRule);
if (rule.hasAnonymousConsequent()) {
logger.error("Skipping unsupported rule " + ruleIdentifier + " -- consequent refers to bnode, which is not" + " currently supported (creating new bnodes at each" + " application could lead to infinite recursion).");
} else {
rules.put((Resource) ruleIdentifier, rule);
}
}
} catch (Exception e) {
throw new TupleQueryResultHandlerException(e);
}
}
}
});
} catch (TupleQueryResultHandlerException | QueryEvaluationException | MalformedQueryException | RepositoryException e) {
throw new ForwardChainException("Couldn't retrieve SPIN rules", e);
} finally {
if (conn != null) {
try {
conn.close();
} catch (RepositoryException e) {
logger.warn("Error closing repository connection", e);
}
}
if (repository.isInitialized()) {
try {
repository.shutDown();
} catch (RepositoryException e) {
logger.warn("Error shutting down repository", e);
}
}
}
return new Ruleset(rules.values());
}
use of org.openrdf.repository.sail.SailRepositoryConnection in project incubator-rya by apache.
the class RyaInputIncrementalUpdateIT method historicThenStreamedResults.
/**
* Simulates the case where a Triple is added to Rya, a new query that
* includes that triple as a historic match is inserted into Fluo, and then
* some new triple that matches the query is streamed into Fluo. The query's
* results must include both the historic result and the newly streamed
* result.
*/
@Test
public void historicThenStreamedResults() throws Exception {
// A query that finds people who talk to Eve and work at Chipotle.
final String sparql = "SELECT ?x " + "WHERE { " + "?x <http://talksTo> <http://Eve>. " + "?x <http://worksAt> <http://Chipotle>." + "}";
// Triples that are loaded into Rya before the PCJ is created.
final ValueFactory vf = new ValueFactoryImpl();
final Set<Statement> historicTriples = Sets.newHashSet(vf.createStatement(vf.createURI("http://Alice"), vf.createURI("http://talksTo"), vf.createURI("http://Eve")), vf.createStatement(vf.createURI("http://Alice"), vf.createURI("http://worksAt"), vf.createURI("http://Chipotle")), vf.createStatement(vf.createURI("http://Joe"), vf.createURI("http://worksAt"), vf.createURI("http://Chipotle")));
// Triples that will be streamed into Fluo after the PCJ has been
final Set<Statement> streamedTriples = Sets.newHashSet(vf.createStatement(vf.createURI("http://Frank"), vf.createURI("http://talksTo"), vf.createURI("http://Eve")), vf.createStatement(vf.createURI("http://Joe"), vf.createURI("http://talksTo"), vf.createURI("http://Eve")), vf.createStatement(vf.createURI("http://Frank"), vf.createURI("http://worksAt"), vf.createURI("http://Chipotle")));
// Load the historic data into Rya.
final SailRepositoryConnection ryaConn = super.getRyaSailRepository().getConnection();
for (final Statement triple : historicTriples) {
ryaConn.add(triple);
}
// Create the PCJ table.
final Connector accumuloConn = super.getAccumuloConnector();
final PrecomputedJoinStorage pcjStorage = new AccumuloPcjStorage(accumuloConn, getRyaInstanceName());
final String pcjId = pcjStorage.createPcj(sparql);
try (FluoClient fluoClient = FluoFactory.newClient(super.getFluoConfiguration())) {
// Tell the Fluo app to maintain the PCJ.
new CreateFluoPcj().withRyaIntegration(pcjId, pcjStorage, fluoClient, accumuloConn, getRyaInstanceName());
super.getMiniFluo().waitForObservers();
// Load the streaming data into Rya.
for (final Statement triple : streamedTriples) {
ryaConn.add(triple);
}
// Ensure Alice is a match.
super.getMiniFluo().waitForObservers();
final Set<BindingSet> expected = new HashSet<>();
MapBindingSet bs = new MapBindingSet();
bs.addBinding("x", vf.createURI("http://Alice"));
expected.add(bs);
bs = new MapBindingSet();
bs.addBinding("x", vf.createURI("http://Frank"));
expected.add(bs);
bs = new MapBindingSet();
bs.addBinding("x", vf.createURI("http://Joe"));
expected.add(bs);
final Set<BindingSet> results = new HashSet<>();
try (CloseableIterator<BindingSet> resultIt = pcjStorage.listResults(pcjId)) {
while (resultIt.hasNext()) {
results.add(resultIt.next());
}
}
assertEquals(expected, results);
}
}
Aggregations