use of org.apache.rya.streams.api.queries.QueryChangeLog in project incubator-rya by apache.
the class KafkaQueryChangeLogSourceIT method newListenerReceivesAllKnownLogs.
@Test
public void newListenerReceivesAllKnownLogs() throws Exception {
// Create a valid Query Change Log topic.
final String ryaInstance = UUID.randomUUID().toString();
final String topic = KafkaTopics.queryChangeLogTopic(ryaInstance);
kafka.createTopic(topic);
// Create the source.
final QueryChangeLogSource source = new KafkaQueryChangeLogSource(kafka.getKafkaHostname(), Integer.parseInt(kafka.getKafkaPort()), Scheduler.newFixedRateSchedule(0, 1, TimeUnit.SECONDS));
// Register a listener that counts down a latch if it sees the new topic.
final CountDownLatch created = new CountDownLatch(1);
source.subscribe(new SourceListener() {
@Override
public void notifyCreate(final String ryaInstanceName, final QueryChangeLog log) {
assertEquals(ryaInstance, ryaInstanceName);
created.countDown();
}
@Override
public void notifyDelete(final String ryaInstanceName) {
}
});
try {
// Start the source
source.startAndWait();
// Wait for that first listener to indicate the topic was created. This means that one has been cached.
assertTrue(created.await(5, TimeUnit.SECONDS));
// Register a second listener that counts down when that same topic is encountered. This means the
// newly subscribed listener was notified with the already known change log.
final CountDownLatch newListenerCreated = new CountDownLatch(1);
source.subscribe(new SourceListener() {
@Override
public void notifyCreate(final String ryaInstanceName, final QueryChangeLog log) {
assertEquals(ryaInstance, ryaInstanceName);
newListenerCreated.countDown();
}
@Override
public void notifyDelete(final String ryaInstanceName) {
}
});
assertTrue(newListenerCreated.await(5, TimeUnit.SECONDS));
} finally {
source.stopAndWait();
}
}
use of org.apache.rya.streams.api.queries.QueryChangeLog in project incubator-rya by apache.
the class LogEventWorkerTest method nofity_logCreated_exists.
@Test
public void nofity_logCreated_exists() throws Exception {
// The signal that will kill the working thread.
final AtomicBoolean shutdownSignal = new AtomicBoolean(false);
// The queue used to feed work.
final BlockingQueue<LogEvent> logEventQueue = new ArrayBlockingQueue<>(10);
// The queue work is written to.
final BlockingQueue<QueryEvent> queryEventQueue = new ArrayBlockingQueue<>(10);
// The Query Change Log that will be watched.
final QueryChangeLog changeLog = new InMemoryQueryChangeLog();
// Write a message that indicates a new query should be active.
final UUID firstQueryId = UUID.randomUUID();
changeLog.write(QueryChange.create(firstQueryId, "select * where { ?a ?b ?c . }", true, false));
// Start the worker that will be tested.
final Thread logEventWorker = new Thread(new LogEventWorker(logEventQueue, queryEventQueue, 50, TimeUnit.MILLISECONDS, shutdownSignal));
logEventWorker.start();
try {
// Write a unit of work that indicates a log was created.
final LogEvent createLogEvent = LogEvent.create("rya", changeLog);
logEventQueue.offer(createLogEvent);
// Say the same log was created a second time.
logEventQueue.offer(createLogEvent);
// Show that only a single unit of work was added for the log. This indicates the
// second message was effectively skipped as it would have add its work added twice otherwise.
final Set<QueryEvent> expectedEvents = new HashSet<>();
expectedEvents.add(QueryEvent.executing("rya", new StreamsQuery(firstQueryId, "select * where { ?a ?b ?c . }", true, false)));
final Set<QueryEvent> queryEvents = new HashSet<>();
queryEvents.add(queryEventQueue.poll(500, TimeUnit.MILLISECONDS));
assertNull(queryEventQueue.poll(500, TimeUnit.MILLISECONDS));
assertEquals(expectedEvents, queryEvents);
} finally {
shutdownSignal.set(true);
logEventWorker.join();
}
}
use of org.apache.rya.streams.api.queries.QueryChangeLog in project incubator-rya by apache.
the class RunQueryCommandIT method setup.
@Before
public void setup() {
// Make sure the topic that the change log uses exists.
final String changeLogTopic = KafkaTopics.queryChangeLogTopic("" + ryaInstance);
kafka.createTopic(changeLogTopic);
// Setup the QueryRepository used by the test.
final Producer<?, QueryChange> queryProducer = KafkaTestUtil.makeProducer(kafka, StringSerializer.class, QueryChangeSerializer.class);
final Consumer<?, QueryChange> queryConsumer = KafkaTestUtil.fromStartConsumer(kafka, StringDeserializer.class, QueryChangeDeserializer.class);
final QueryChangeLog changeLog = new KafkaQueryChangeLog(queryProducer, queryConsumer, changeLogTopic);
queryRepo = new InMemoryQueryRepository(changeLog, Scheduler.newFixedRateSchedule(0L, 5, TimeUnit.SECONDS));
// Initialize the Statements Producer and the Results Consumer.
stmtProducer = KafkaTestUtil.makeProducer(kafka, StringSerializer.class, VisibilityStatementSerializer.class);
resultConsumer = KafkaTestUtil.fromStartConsumer(kafka, StringDeserializer.class, VisibilityBindingSetDeserializer.class);
}
use of org.apache.rya.streams.api.queries.QueryChangeLog in project incubator-rya by apache.
the class ListQueriesCommand method execute.
@Override
public void execute(final String[] args) throws ArgumentsException, ExecutionException {
requireNonNull(args);
// Parse the command line arguments.
final KafkaParameters params = new KafkaParameters();
try {
new JCommander(params, args);
} catch (final ParameterException e) {
throw new ArgumentsException("Could not list the queries because of invalid command line parameters.", e);
}
// Create the Kafka backed QueryChangeLog.
final String bootstrapServers = params.kafkaIP + ":" + params.kafkaPort;
final String topic = KafkaTopics.queryChangeLogTopic(params.ryaInstance);
final QueryChangeLog queryChangeLog = KafkaQueryChangeLogFactory.make(bootstrapServers, topic);
// The ListQueries command doesn't use the scheduled service feature.
final Scheduler scheduler = Scheduler.newFixedRateSchedule(0L, 5, TimeUnit.SECONDS);
final QueryRepository queryRepo = new InMemoryQueryRepository(queryChangeLog, scheduler);
// Execute the list queries command.
try {
final ListQueries listQueries = new DefaultListQueries(queryRepo);
try {
final Set<StreamsQuery> queries = listQueries.all();
System.out.println(formatQueries(queries));
} catch (final RyaStreamsException e) {
System.err.println("Unable to retrieve the queries.");
e.printStackTrace();
System.exit(1);
}
} catch (final Exception e) {
System.err.println("Problem encountered while closing the QueryRepository.");
e.printStackTrace();
System.exit(1);
}
}
use of org.apache.rya.streams.api.queries.QueryChangeLog in project incubator-rya by apache.
the class DeleteQueryCommand method execute.
@Override
public void execute(final String[] args) throws ArgumentsException, ExecutionException {
requireNonNull(args);
// Parse the command line arguments.
final RemoveParameters params = new RemoveParameters();
try {
new JCommander(params, args);
} catch (final ParameterException e) {
throw new ArgumentsException("Could not add a new query because of invalid command line parameters.", e);
}
// Create the Kafka backed QueryChangeLog.
final String bootstrapServers = params.kafkaIP + ":" + params.kafkaPort;
final String topic = KafkaTopics.queryChangeLogTopic(params.ryaInstance);
final QueryChangeLog queryChangeLog = KafkaQueryChangeLogFactory.make(bootstrapServers, topic);
// The DeleteQuery command doesn't use the scheduled service feature.
final Scheduler scheduler = Scheduler.newFixedRateSchedule(0L, 5, TimeUnit.SECONDS);
final QueryRepository queryRepo = new InMemoryQueryRepository(queryChangeLog, scheduler);
// Execute the delete query command.
try {
final DeleteQuery deleteQuery = new DefaultDeleteQuery(queryRepo);
try {
deleteQuery.delete(UUID.fromString(params.queryId));
System.out.println("Deleted query: " + params.queryId);
} catch (final RyaStreamsException e) {
System.err.println("Unable to delete query with ID: " + params.queryId);
e.printStackTrace();
System.exit(1);
}
} catch (final Exception e) {
System.err.println("Problem encountered while closing the QueryRepository.");
e.printStackTrace();
System.exit(1);
}
}
Aggregations