use of org.apache.rya.streams.api.entity.StreamsQuery in project incubator-rya by apache.
the class RyaStreamsCommands method addQuery.
@CliCommand(value = STREAM_QUERIES_ADD_CMD, help = "Add a SPARQL query to the Rya Streams subsystem.")
public String addQuery(@CliOption(key = { "inactive" }, mandatory = false, unspecifiedDefaultValue = "false", specifiedDefaultValue = "true", help = "Setting this flag will add the query, but not run it. (default: false)") final boolean inactive, @CliOption(key = { "insert" }, mandatory = false, unspecifiedDefaultValue = "false", specifiedDefaultValue = "true", help = "Setting this flag will insert the query's results back into Rya. (default: false)") final boolean isInsert) {
final RyaStreamsClient streamsClient = state.getShellState().getRyaStreamsCommands().get();
// Prompt the user for the SPARQL that defines the query.
try {
final Optional<String> sparql = sparqlPrompt.getSparql();
// If the user aborted the prompt, return.
if (!sparql.isPresent()) {
return "";
}
final boolean isConstructQuery = QueryInvestigator.isConstruct(sparql.get());
final boolean isInsertQuery = QueryInvestigator.isInsertWhere(sparql.get());
// If the user wants to insert a CONSTRUCT into Rya, print a warning.
if (isInsert && isConstructQuery) {
consolePrinter.println("WARNING: CONSTRUCT is part of the SPARQL Query API, so they do not normally");
consolePrinter.println("get written back to the triple store. Consider using an INSERT, which is");
consolePrinter.println("part of the SPARQL Update API, in the future.");
}
// If the user wants to use an INSERT query, but not insert it back into Rya, suggest using a construct.
if (!isInsert && isInsertQuery) {
consolePrinter.println("WARNING: INSERT is part of the SPARQL Update API, so they normally get written");
consolePrinter.println("back to the triple store. Consider using a CONSTRUCT, which is part of the");
consolePrinter.println("SPARQL Query API, in the future.");
}
// If the user wants to insert the query back into Rya, make sure it is a legal query to do that.
if (isInsert && !(isConstructQuery || isInsertQuery)) {
throw new RuntimeException("Only CONSTRUCT queries and INSERT updates may be inserted back to the triple store.");
}
final StreamsQuery streamsQuery = streamsClient.getAddQuery().addQuery(sparql.get(), !inactive, isInsert);
return "The added query's ID is " + streamsQuery.getQueryId();
} catch (final MalformedQueryException | IOException | RyaStreamsException e) {
throw new RuntimeException("Unable to add the SPARQL query to the Rya Streams subsystem.", e);
}
}
use of org.apache.rya.streams.api.entity.StreamsQuery in project incubator-rya by apache.
the class QueryEventWorkerTest method executingWork.
@Test
public void executingWork() throws Exception {
// The signal that will kill the working thread.
final AtomicBoolean shutdownSignal = new AtomicBoolean(false);
// The queue used to send the execute work to the thread.
final BlockingQueue<QueryEvent> queue = new ArrayBlockingQueue<>(1);
// The message that indicates a query needs to be executed.
final String ryaInstance = "rya";
final StreamsQuery query = new StreamsQuery(UUID.randomUUID(), "sparql", true, false);
final QueryEvent executingEvent = QueryEvent.executing(ryaInstance, query);
// Release a latch if the startQuery method on the queryExecutor is invoked with the correct values.
final CountDownLatch startQueryInvoked = new CountDownLatch(1);
final QueryExecutor queryExecutor = mock(QueryExecutor.class);
doAnswer(invocation -> {
startQueryInvoked.countDown();
return null;
}).when(queryExecutor).startQuery(ryaInstance, query);
// The thread that will perform the QueryEventWorker task.
final Thread queryEventWorker = new Thread(new QueryEventWorker(queue, queryExecutor, 50, TimeUnit.MILLISECONDS, shutdownSignal));
try {
queryEventWorker.start();
// Provide a message indicating a query needs to be executing.
queue.put(executingEvent);
// Verify the Query Executor was told to start the query.
assertTrue(startQueryInvoked.await(150, TimeUnit.MILLISECONDS));
} finally {
shutdownSignal.set(true);
queryEventWorker.join();
}
}
use of org.apache.rya.streams.api.entity.StreamsQuery in project incubator-rya by apache.
the class LocalQueryExecutorIT method runQuery.
@Test
public void runQuery() throws Exception {
// Test values.
final String ryaInstance = "rya";
final StreamsQuery sQuery = new StreamsQuery(UUID.randomUUID(), "SELECT * WHERE { ?person <urn:worksAt> ?business . }", true, false);
// Create the statements that will be loaded.
final ValueFactory vf = new ValueFactoryImpl();
final List<VisibilityStatement> statements = new ArrayList<>();
statements.add(new VisibilityStatement(vf.createStatement(vf.createURI("urn:Alice"), vf.createURI("urn:worksAt"), vf.createURI("urn:BurgerJoint")), "a"));
statements.add(new VisibilityStatement(vf.createStatement(vf.createURI("urn:Bob"), vf.createURI("urn:worksAt"), vf.createURI("urn:TacoShop")), "a"));
statements.add(new VisibilityStatement(vf.createStatement(vf.createURI("urn:Charlie"), vf.createURI("urn:worksAt"), vf.createURI("urn:TacoShop")), "a"));
// Create the expected results.
final List<VisibilityBindingSet> expected = new ArrayList<>();
MapBindingSet bs = new MapBindingSet();
bs.addBinding("person", vf.createURI("urn:Alice"));
bs.addBinding("business", vf.createURI("urn:BurgerJoint"));
expected.add(new VisibilityBindingSet(bs, "a"));
bs = new MapBindingSet();
bs.addBinding("person", vf.createURI("urn:Bob"));
bs.addBinding("business", vf.createURI("urn:TacoShop"));
expected.add(new VisibilityBindingSet(bs, "a"));
bs = new MapBindingSet();
bs.addBinding("person", vf.createURI("urn:Charlie"));
bs.addBinding("business", vf.createURI("urn:TacoShop"));
expected.add(new VisibilityBindingSet(bs, "a"));
// Start the executor that will be tested.
final CreateKafkaTopic createKafkaTopic = new CreateKafkaTopic(kafka.getZookeeperServers());
final String kafkaServers = kafka.getKafkaHostname() + ":" + kafka.getKafkaPort();
final KafkaStreamsFactory jobFactory = new SingleThreadKafkaStreamsFactory(kafkaServers);
final QueryExecutor executor = new LocalQueryExecutor(createKafkaTopic, jobFactory);
executor.startAndWait();
try {
// Start the query.
executor.startQuery(ryaInstance, sQuery);
// Wait for the program to start.
Thread.sleep(5000);
// Write some statements to the program.
final String statementsTopic = KafkaTopics.statementsTopic(ryaInstance);
final LoadStatements loadStatements = new KafkaLoadStatements(statementsTopic, stmtProducer);
loadStatements.fromCollection(statements);
// Read the output of the streams program.
final String resultsTopic = KafkaTopics.queryResultsTopic(ryaInstance, sQuery.getQueryId());
resultConsumer.subscribe(Lists.newArrayList(resultsTopic));
final List<VisibilityBindingSet> results = KafkaTestUtil.pollForResults(500, 6, 3, resultConsumer);
assertEquals(expected, results);
} finally {
executor.stopAndWait();
}
}
use of org.apache.rya.streams.api.entity.StreamsQuery in project incubator-rya by apache.
the class RunQueryCommand method execute.
@Override
public void execute(final String[] args) throws ArgumentsException, ExecutionException {
requireNonNull(args);
// Parse the command line arguments.
final RunParameters params = new RunParameters();
try {
new JCommander(params, args);
} catch (final ParameterException e) {
throw new ArgumentsException("Could not add a new query because of invalid command line parameters.", e);
}
// Create the Kafka backed QueryChangeLog.
final String bootstrapServers = params.kafkaIP + ":" + params.kafkaPort;
final String topic = KafkaTopics.queryChangeLogTopic(params.ryaInstance);
final QueryChangeLog queryChangeLog = KafkaQueryChangeLogFactory.make(bootstrapServers, topic);
// The RunQuery command doesn't use the scheduled service feature.
final Scheduler scheduler = Scheduler.newFixedRateSchedule(0L, 5, TimeUnit.SECONDS);
final QueryRepository queryRepo = new InMemoryQueryRepository(queryChangeLog, scheduler);
// Look up the query to be executed from the change log.
try {
try {
final UUID queryId = UUID.fromString(params.queryId);
final Optional<StreamsQuery> query = queryRepo.get(queryId);
if (!query.isPresent()) {
throw new ArgumentsException("There is no registered query for queryId " + params.queryId);
}
// Make sure the topics required by the application exists for the specified Rya instances.
final Set<String> topics = new HashSet<>();
topics.add(KafkaTopics.statementsTopic(params.ryaInstance));
topics.add(KafkaTopics.queryResultsTopic(params.ryaInstance, queryId));
KafkaTopics.createTopics(params.zookeeperServers, topics, 1, 1);
// Run the query that uses those topics.
final KafkaRunQuery runQuery = new KafkaRunQuery(params.kafkaIP, params.kafkaPort, KafkaTopics.statementsTopic(params.ryaInstance), KafkaTopics.queryResultsTopic(params.ryaInstance, queryId), queryRepo, new TopologyFactory());
runQuery.run(queryId);
} catch (final Exception e) {
throw new ExecutionException("Could not execute the Run Query command.", e);
}
} catch (final ExecutionException e) {
// Rethrow the exceptions that are advertised by execute.
throw e;
} catch (final Exception e) {
throw new ExecutionException("Problem encountered while closing the QueryRepository.", e);
}
}
use of org.apache.rya.streams.api.entity.StreamsQuery in project incubator-rya by apache.
the class DeleteQueryCommandIT method shortParams.
@Test
public void shortParams() throws Exception {
// Add a few queries to Rya Streams.
queryRepo.add("query1", true, true);
final UUID query2Id = queryRepo.add("query2", false, true).getQueryId();
queryRepo.add("query3", true, false);
// Show that all three of the queries were added.
Set<StreamsQuery> queries = queryRepo.list();
assertEquals(3, queries.size());
// Delete query 2 using the delete query command.
final String[] deleteArgs = new String[] { "-r", ryaInstance, "-i", kafka.getKafkaHostname(), "-p", kafka.getKafkaPort(), "-q", query2Id.toString() };
final DeleteQueryCommand deleteCommand = new DeleteQueryCommand();
deleteCommand.execute(deleteArgs);
// Show query2 was deleted.
queries = queryRepo.list();
assertEquals(2, queries.size());
for (final StreamsQuery query : queries) {
assertNotEquals(query2Id, query.getQueryId());
}
}
Aggregations