use of org.openrdf.query.algebra.Reduced in project incubator-rya by apache.
the class StreamResultsCommand method execute.
@Override
public void execute(final String[] args) throws ArgumentsException, ExecutionException {
requireNonNull(args);
// Parse the command line arguments.
final StreamResultsParameters params = new StreamResultsParameters();
try {
new JCommander(params, args);
} catch (final ParameterException e) {
throw new ArgumentsException("Could not stream the query's results because of invalid command line parameters.", e);
}
// Create the Kafka backed QueryChangeLog.
final String bootstrapServers = params.kafkaIP + ":" + params.kafkaPort;
final String topic = KafkaTopics.queryChangeLogTopic(params.ryaInstance);
final QueryChangeLog queryChangeLog = KafkaQueryChangeLogFactory.make(bootstrapServers, topic);
// Parse the Query ID from the command line parameters.
final UUID queryId;
try {
queryId = UUID.fromString(params.queryId);
} catch (final IllegalArgumentException e) {
throw new ArgumentsException("Invalid Query ID " + params.queryId);
}
// The DeleteQuery command doesn't use the scheduled service feature.
final Scheduler scheduler = Scheduler.newFixedRateSchedule(0L, 5, TimeUnit.SECONDS);
final QueryRepository queryRepo = new InMemoryQueryRepository(queryChangeLog, scheduler);
// Fetch the SPARQL of the query whose results will be streamed.
final String sparql;
try {
final Optional<StreamsQuery> sQuery = queryRepo.get(queryId);
if (!sQuery.isPresent()) {
throw new ExecutionException("Could not read the results for query with ID " + queryId + " because no such query exists.");
}
sparql = sQuery.get().getSparql();
} catch (final Exception e) {
throw new ExecutionException("Problem encountered while closing the QueryRepository.", e);
}
// This command executes until the application is killed, so create a kill boolean.
final AtomicBoolean finished = new AtomicBoolean(false);
Runtime.getRuntime().addShutdownHook(new Thread() {
@Override
public void run() {
finished.set(true);
}
});
// Build the interactor based on the type of result the query produces.
final GetQueryResultStream<?> getQueryResultStream;
try {
final TupleExpr tupleExpr = new SPARQLParser().parseQuery(sparql, null).getTupleExpr();
if (tupleExpr instanceof Reduced) {
getQueryResultStream = new KafkaGetQueryResultStream<>(params.kafkaIP, params.kafkaPort, VisibilityStatementDeserializer.class);
} else {
getQueryResultStream = new KafkaGetQueryResultStream<>(params.kafkaIP, params.kafkaPort, VisibilityBindingSetDeserializer.class);
}
} catch (final MalformedQueryException e) {
throw new ExecutionException("Could not parse the SPARQL for the query: " + sparql, e);
}
// Iterate through the results and print them to the console until the program or the stream ends.
try (final QueryResultStream<?> stream = getQueryResultStream.fromStart(params.ryaInstance, queryId)) {
while (!finished.get()) {
for (final Object result : stream.poll(1000)) {
System.out.println(result);
}
}
} catch (final Exception e) {
System.err.println("Error while reading the results from the stream.");
e.printStackTrace();
System.exit(1);
}
}
Aggregations