use of org.apache.rya.streams.kafka.SingleThreadKafkaStreamsFactory in project incubator-rya by apache.
the class QueryManagerDaemon method init.
@Override
public void init(final DaemonContext context) throws DaemonInitException, Exception {
requireNonNull(context);
// Parse the command line arguments for the configuration file to use.
final String[] args = context.getArguments();
final DaemonParameters params = new DaemonParameters();
try {
new JCommander(params).parse(args);
} catch (final ParameterException e) {
throw new DaemonInitException("Unable to parse the command line arguments.", e);
}
final Path configFile = params.config != null ? Paths.get(params.config) : DEFAULT_CONFIGURATION_PATH;
log.info("Loading the following configuration file: " + configFile);
// Unmarshall the configuration file into an object.
final QueryManagerConfig config;
try (final InputStream stream = Files.newInputStream(configFile)) {
config = QueryManagerConfigUnmarshaller.unmarshall(stream);
} catch (final JAXBException | SAXException e) {
throw new DaemonInitException("Unable to marshall the configuration XML file: " + configFile, e);
}
// Read the source polling period from the configuration.
final QueryChanngeLogDiscoveryPeriod periodConfig = config.getPerformanceTunning().getQueryChanngeLogDiscoveryPeriod();
final long period = periodConfig.getValue().longValue();
final TimeUnit units = TimeUnit.valueOf(periodConfig.getUnits().toString());
log.info("Query Change Log Polling Period: " + period + " " + units);
final Scheduler scheduler = Scheduler.newFixedRateSchedule(0, period, units);
// Initialize a QueryChangeLogSource.
final Kafka kafka = config.getQueryChangeLogSource().getKafka();
log.info("Kafka Source: " + kafka.getHostname() + ":" + kafka.getPort());
final QueryChangeLogSource source = new KafkaQueryChangeLogSource(kafka.getHostname(), kafka.getPort(), scheduler);
// Initialize a QueryExecutor.
final String zookeeperServers = config.getQueryExecutor().getLocalKafkaStreams().getZookeepers();
final KafkaStreamsFactory streamsFactory = new SingleThreadKafkaStreamsFactory(kafka.getHostname() + ":" + kafka.getPort());
final QueryExecutor queryExecutor = new LocalQueryExecutor(new CreateKafkaTopic(zookeeperServers), streamsFactory);
// Initialize the QueryManager using the configured resources.
manager = new QueryManager(queryExecutor, source, period, units);
}
use of org.apache.rya.streams.kafka.SingleThreadKafkaStreamsFactory in project incubator-rya by apache.
the class LocalQueryExecutorIT method runQuery.
@Test
public void runQuery() throws Exception {
// Test values.
final String ryaInstance = "rya";
final StreamsQuery sQuery = new StreamsQuery(UUID.randomUUID(), "SELECT * WHERE { ?person <urn:worksAt> ?business . }", true, false);
// Create the statements that will be loaded.
final ValueFactory vf = new ValueFactoryImpl();
final List<VisibilityStatement> statements = new ArrayList<>();
statements.add(new VisibilityStatement(vf.createStatement(vf.createURI("urn:Alice"), vf.createURI("urn:worksAt"), vf.createURI("urn:BurgerJoint")), "a"));
statements.add(new VisibilityStatement(vf.createStatement(vf.createURI("urn:Bob"), vf.createURI("urn:worksAt"), vf.createURI("urn:TacoShop")), "a"));
statements.add(new VisibilityStatement(vf.createStatement(vf.createURI("urn:Charlie"), vf.createURI("urn:worksAt"), vf.createURI("urn:TacoShop")), "a"));
// Create the expected results.
final List<VisibilityBindingSet> expected = new ArrayList<>();
MapBindingSet bs = new MapBindingSet();
bs.addBinding("person", vf.createURI("urn:Alice"));
bs.addBinding("business", vf.createURI("urn:BurgerJoint"));
expected.add(new VisibilityBindingSet(bs, "a"));
bs = new MapBindingSet();
bs.addBinding("person", vf.createURI("urn:Bob"));
bs.addBinding("business", vf.createURI("urn:TacoShop"));
expected.add(new VisibilityBindingSet(bs, "a"));
bs = new MapBindingSet();
bs.addBinding("person", vf.createURI("urn:Charlie"));
bs.addBinding("business", vf.createURI("urn:TacoShop"));
expected.add(new VisibilityBindingSet(bs, "a"));
// Start the executor that will be tested.
final CreateKafkaTopic createKafkaTopic = new CreateKafkaTopic(kafka.getZookeeperServers());
final String kafkaServers = kafka.getKafkaHostname() + ":" + kafka.getKafkaPort();
final KafkaStreamsFactory jobFactory = new SingleThreadKafkaStreamsFactory(kafkaServers);
final QueryExecutor executor = new LocalQueryExecutor(createKafkaTopic, jobFactory);
executor.startAndWait();
try {
// Start the query.
executor.startQuery(ryaInstance, sQuery);
// Wait for the program to start.
Thread.sleep(5000);
// Write some statements to the program.
final String statementsTopic = KafkaTopics.statementsTopic(ryaInstance);
final LoadStatements loadStatements = new KafkaLoadStatements(statementsTopic, stmtProducer);
loadStatements.fromCollection(statements);
// Read the output of the streams program.
final String resultsTopic = KafkaTopics.queryResultsTopic(ryaInstance, sQuery.getQueryId());
resultConsumer.subscribe(Lists.newArrayList(resultsTopic));
final List<VisibilityBindingSet> results = KafkaTestUtil.pollForResults(500, 6, 3, resultConsumer);
assertEquals(expected, results);
} finally {
executor.stopAndWait();
}
}
Aggregations