use of org.openrdf.query.MalformedQueryException in project incubator-rya by apache.
the class QueryRuleset method setRules.
/**
* Extract the rules from the query string, applying inference rules if configured to.
* @throws QueryRulesetException if the parsed query can't be parsed and translated into valid rules.
*/
private void setRules() throws QueryRulesetException {
final ParsedTupleQuery ptq;
final TupleExpr te;
try {
ptq = QueryParserUtil.parseTupleQuery(QueryLanguage.SPARQL, query, null);
} catch (UnsupportedQueryLanguageException | MalformedQueryException e) {
throw new QueryRulesetException("Error parsing query:\n" + query, e);
}
te = ptq.getTupleExpr();
// Before converting to rules (and renaming variables), validate that no statement patterns
// consist of only variables (this would result in a rule that matches every triple).
// Needs to be done before inference, since inference rules may create such statement patterns
// that are OK because they won'd be converted to rules directly.
te.visit(new QueryModelVisitorBase<QueryRulesetException>() {
@Override
public void meet(final StatementPattern node) throws QueryRulesetException {
if (!(node.getSubjectVar().hasValue() || node.getPredicateVar().hasValue() || node.getObjectVar().hasValue())) {
throw new QueryRulesetException("Statement pattern with no constants would match every statement:\n" + node + "\nFrom parsed query:\n" + te);
}
}
});
// Apply inference, if applicable
if (conf != null && conf.isInfer()) {
RdfCloudTripleStore store = null;
try {
log.info("Applying inference rules");
store = (RdfCloudTripleStore) RyaSailFactory.getInstance(conf);
final InferenceEngine inferenceEngine = store.getInferenceEngine();
// Apply in same order as query evaluation:
te.visit(new TransitivePropertyVisitor(conf, inferenceEngine));
te.visit(new SymmetricPropertyVisitor(conf, inferenceEngine));
te.visit(new InverseOfVisitor(conf, inferenceEngine));
te.visit(new SubPropertyOfVisitor(conf, inferenceEngine));
te.visit(new SubClassOfVisitor(conf, inferenceEngine));
te.visit(new SameAsVisitor(conf, inferenceEngine));
log.info("Query after inference:\n");
for (final String line : te.toString().split("\n")) {
log.info("\t" + line);
}
} catch (final Exception e) {
throw new QueryRulesetException("Error applying inference to parsed query:\n" + te, e);
} finally {
if (store != null) {
try {
store.shutDown();
} catch (final SailException e) {
log.error("Error shutting down Sail after applying inference", e);
}
}
}
}
// Extract the StatementPatterns and Filters and turn them into rules:
final RulesetVisitor rv = new RulesetVisitor();
try {
te.visit(rv);
rv.addSchema();
} catch (final QueryRulesetException e) {
throw new QueryRulesetException("Error extracting rules from parsed query:\n" + te, e);
}
for (final CopyRule candidateRule : rv.rules) {
boolean unique = true;
for (final CopyRule otherRule : rv.rules) {
if (!candidateRule.equals(otherRule) && otherRule.isGeneralizationOf(candidateRule)) {
unique = false;
break;
}
}
if (unique) {
rules.add(candidateRule);
}
}
}
use of org.openrdf.query.MalformedQueryException in project incubator-rya by apache.
the class CreatePeriodicQuery method createPeriodicQuery.
/**
* Creates a Periodic Query by adding the query to Fluo and using the resulting
* Fluo id to create a {@link PeriodicQueryResultStorage} table. Additionally,
* the associated PeriodicNotification is registered with the Periodic Query Service.
*
* @param sparql - sparql query registered to Fluo whose results are stored in PeriodicQueryResultStorage table
* @param notificationClient - {@link PeriodicNotificationClient} for registering new PeriodicNotifications
* @return FluoQuery indicating the metadata of the registered SPARQL query
*/
public FluoQuery createPeriodicQuery(String sparql, PeriodicNotificationClient notificationClient) throws PeriodicQueryCreationException {
try {
Optional<PeriodicQueryNode> optNode = PeriodicQueryUtil.getPeriodicNode(sparql);
if (optNode.isPresent()) {
PeriodicQueryNode periodicNode = optNode.get();
String pcjId = FluoQueryUtils.createNewPcjId();
// register query with Fluo
CreateFluoPcj createPcj = new CreateFluoPcj();
FluoQuery fluoQuery = createPcj.createPcj(pcjId, sparql, Sets.newHashSet(ExportStrategy.PERIODIC), fluoClient);
// register query with PeriodicResultStorage table
periodicStorage.createPeriodicQuery(pcjId, sparql);
// create notification
PeriodicNotification notification = PeriodicNotification.builder().id(pcjId).period(periodicNode.getPeriod()).timeUnit(periodicNode.getUnit()).build();
// register notification with periodic notification app
notificationClient.addNotification(notification);
return fluoQuery;
} else {
throw new RuntimeException("Invalid PeriodicQuery. Query must possess a PeriodicQuery Filter.");
}
} catch (MalformedQueryException | PeriodicQueryStorageException | UnsupportedQueryException e) {
throw new PeriodicQueryCreationException(e);
}
}
use of org.openrdf.query.MalformedQueryException in project incubator-rya by apache.
the class CreatePeriodicQuery method createPeriodicQuery.
/**
* Creates a Periodic Query by adding the query to Fluo and using the resulting
* Fluo id to create a {@link PeriodicQueryResultStorage} table.
*
* @param sparql - sparql query registered to Fluo whose results are stored in PeriodicQueryResultStorage table
* @return FluoQuery indicating the metadata of the registered SPARQL query
*/
public FluoQuery createPeriodicQuery(String sparql) throws PeriodicQueryCreationException {
try {
Optional<PeriodicQueryNode> optNode = PeriodicQueryUtil.getPeriodicNode(sparql);
if (optNode.isPresent()) {
String pcjId = FluoQueryUtils.createNewPcjId();
// register query with Fluo
CreateFluoPcj createPcj = new CreateFluoPcj();
FluoQuery fluoQuery = createPcj.createPcj(pcjId, sparql, Sets.newHashSet(ExportStrategy.PERIODIC), fluoClient);
// register query with PeriodicResultStorage table
periodicStorage.createPeriodicQuery(pcjId, sparql);
return fluoQuery;
} else {
throw new RuntimeException("Invalid PeriodicQuery. Query must possess a PeriodicQuery Filter.");
}
} catch (MalformedQueryException | PeriodicQueryStorageException | UnsupportedQueryException e) {
throw new PeriodicQueryCreationException(e);
}
}
use of org.openrdf.query.MalformedQueryException in project incubator-rya by apache.
the class MongoExecuteSparqlQuery method executeSparqlQuery.
@Override
public String executeSparqlQuery(final String ryaInstanceName, final String sparqlQuery) throws InstanceDoesNotExistException, RyaClientException {
requireNonNull(ryaInstanceName);
requireNonNull(sparqlQuery);
// Ensure the Rya Instance exists.
if (!instanceExists.exists(ryaInstanceName)) {
throw new InstanceDoesNotExistException(String.format("There is no Rya instance named '%s'.", ryaInstanceName));
}
Sail sail = null;
SailRepositoryConnection sailRepoConn = null;
try {
// Get a Sail object that is connected to the Rya instance.
final MongoDBRdfConfiguration ryaConf = connectionDetails.build(ryaInstanceName);
sail = RyaSailFactory.getInstance(ryaConf);
final SailRepository sailRepo = new SailRepository(sail);
sailRepoConn = sailRepo.getConnection();
// Execute the query.
final long start = System.currentTimeMillis();
final TupleQuery tupleQuery = sailRepoConn.prepareTupleQuery(QueryLanguage.SPARQL, sparqlQuery);
final ByteArrayOutputStream baos = new ByteArrayOutputStream();
final CountingSPARQLResultsCSVWriter handler = new CountingSPARQLResultsCSVWriter(baos);
tupleQuery.evaluate(handler);
final long end = System.currentTimeMillis();
// Format and return the result of the query.
final String queryResult = new String(baos.toByteArray(), StandardCharsets.UTF_8);
final String queryDuration = new DecimalFormat("0.0##").format((end - start) / 1000.0);
return "Query Result:\n" + queryResult + "Retrieved " + handler.getCount() + " results in " + queryDuration + " seconds.";
} catch (SailException | RyaDAOException | InferenceEngineException | AccumuloException | AccumuloSecurityException e) {
throw new RyaClientException("Could not create the Sail object used to query the RYA instance.", e);
} catch (final MalformedQueryException | QueryEvaluationException | TupleQueryResultHandlerException | RepositoryException e) {
throw new RyaClientException("Could not execute the SPARQL query.", e);
} finally {
// Close the resources that were opened.
if (sailRepoConn != null) {
try {
sailRepoConn.close();
} catch (final RepositoryException e) {
log.error("Couldn't close the SailRepositoryConnection object.", e);
}
}
if (sail != null) {
try {
sail.shutDown();
} catch (final SailException e) {
log.error("Couldn't close the Sail object.", e);
}
}
}
}
use of org.openrdf.query.MalformedQueryException in project incubator-rya by apache.
the class AccumuloBatchUpdatePCJ method updatePCJResults.
private void updatePCJResults(final String ryaInstanceName, final String pcjId) throws InstanceDoesNotExistException, PCJDoesNotExistException, RyaClientException {
// Things that have to be closed before we exit.
Sail sail = null;
SailConnection sailConn = null;
CloseableIteration<? extends BindingSet, QueryEvaluationException> results = null;
try (final PrecomputedJoinStorage pcjStorage = new AccumuloPcjStorage(super.getConnector(), ryaInstanceName)) {
// Create an instance of Sail backed by the Rya instance.
sail = connectToRya(ryaInstanceName);
// Purge the old results from the PCJ.
try {
pcjStorage.purge(pcjId);
} catch (final PCJStorageException e) {
throw new RyaClientException("Could not batch update PCJ with ID '" + pcjId + "' because the old " + "results could not be purged from it.", e);
}
// Parse the PCJ's SPARQL query.
final PcjMetadata metadata = pcjStorage.getPcjMetadata(pcjId);
final String sparql = metadata.getSparql();
final SPARQLParser parser = new SPARQLParser();
final ParsedQuery parsedQuery = parser.parseQuery(sparql, null);
// Execute the query.
sailConn = sail.getConnection();
results = sailConn.evaluate(parsedQuery.getTupleExpr(), null, null, false);
// Load the results into the PCJ table.
final List<VisibilityBindingSet> batch = new ArrayList<>(1000);
while (results.hasNext()) {
final VisibilityBindingSet result = new VisibilityBindingSet(results.next(), "");
batch.add(result);
if (batch.size() == 1000) {
pcjStorage.addResults(pcjId, batch);
batch.clear();
}
}
if (!batch.isEmpty()) {
pcjStorage.addResults(pcjId, batch);
batch.clear();
}
} catch (final MalformedQueryException | PCJStorageException | SailException | QueryEvaluationException e) {
throw new RyaClientException("Fail to batch load new results into the PCJ with ID '" + pcjId + "'.", e);
} finally {
if (results != null) {
try {
results.close();
} catch (final QueryEvaluationException e) {
log.warn(e.getMessage(), e);
}
}
if (sailConn != null) {
try {
sailConn.close();
} catch (final SailException e) {
log.warn(e.getMessage(), e);
}
}
if (sail != null) {
try {
sail.shutDown();
} catch (final SailException e) {
log.warn(e.getMessage(), e);
}
}
}
}
Aggregations