use of org.apache.fluo.api.client.FluoClient in project incubator-rya by apache.
the class GetPcjMetadataIT method getAllMetadata.
@Test
public void getAllMetadata() throws MalformedQueryException, SailException, QueryEvaluationException, PcjException, NotInFluoException, NotInAccumuloException, AccumuloException, AccumuloSecurityException, RyaDAOException, UnsupportedQueryException {
final Connector accumuloConn = super.getAccumuloConnector();
final PrecomputedJoinStorage pcjStorage = new AccumuloPcjStorage(accumuloConn, getRyaInstanceName());
try (FluoClient fluoClient = FluoFactory.newClient(super.getFluoConfiguration())) {
// Add a couple of queries to Accumulo.
final String q1Sparql = "SELECT ?x " + "WHERE { " + "?x <http://talksTo> <http://Eve>. " + "?x <http://worksAt> <http://Chipotle>." + "}";
final String q1PcjId = pcjStorage.createPcj(q1Sparql);
final CreateFluoPcj createPcj = new CreateFluoPcj();
createPcj.withRyaIntegration(q1PcjId, pcjStorage, fluoClient, accumuloConn, getRyaInstanceName());
final String q2Sparql = "SELECT ?x ?y " + "WHERE { " + "?x <http://talksTo> ?y. " + "?y <http://worksAt> <http://Chipotle>." + "}";
final String q2PcjId = pcjStorage.createPcj(q2Sparql);
createPcj.withRyaIntegration(q2PcjId, pcjStorage, fluoClient, accumuloConn, getRyaInstanceName());
// Ensure the command returns the correct metadata.
final Set<PcjMetadata> expected = new HashSet<>();
final Set<VariableOrder> q1VarOrders = new ShiftVarOrderFactory().makeVarOrders(q1Sparql);
final Set<VariableOrder> q2VarOrders = new ShiftVarOrderFactory().makeVarOrders(q2Sparql);
expected.add(new PcjMetadata(q1Sparql, 0L, q1VarOrders));
expected.add(new PcjMetadata(q2Sparql, 0L, q2VarOrders));
final Map<String, PcjMetadata> metadata = new GetPcjMetadata().getMetadata(pcjStorage, fluoClient);
assertEquals(expected, Sets.newHashSet(metadata.values()));
}
}
use of org.apache.fluo.api.client.FluoClient in project incubator-rya by apache.
the class PeriodicNotificationBinPrunerIT method periodicPrunerTest.
@Test
public void periodicPrunerTest() throws Exception {
String sparql = // n
"prefix function: <http://org.apache.rya/function#> " + // n
"prefix time: <http://www.w3.org/2006/time#> " + // n
"select ?id (count(?obs) as ?total) where {" + // n
"Filter(function:periodic(?time, 2, .5, time:hours)) " + // n
"?obs <uri:hasTime> ?time. " + // n
"?obs <uri:hasId> ?id } group by ?id";
FluoClient fluo = new FluoClientImpl(super.getFluoConfiguration());
// initialize resources and create pcj
PeriodicQueryResultStorage periodicStorage = new AccumuloPeriodicQueryResultStorage(super.getAccumuloConnector(), getRyaInstanceName());
CreatePeriodicQuery createPeriodicQuery = new CreatePeriodicQuery(fluo, periodicStorage);
String queryId = FluoQueryUtils.convertFluoQueryIdToPcjId(createPeriodicQuery.createPeriodicQuery(sparql).getQueryId());
// create statements to ingest into Fluo
final ValueFactory vf = new ValueFactoryImpl();
final DatatypeFactory dtf = DatatypeFactory.newInstance();
ZonedDateTime time = ZonedDateTime.now();
long currentTime = time.toInstant().toEpochMilli();
ZonedDateTime zTime1 = time.minusMinutes(30);
String time1 = zTime1.format(DateTimeFormatter.ISO_INSTANT);
ZonedDateTime zTime2 = zTime1.minusMinutes(30);
String time2 = zTime2.format(DateTimeFormatter.ISO_INSTANT);
ZonedDateTime zTime3 = zTime2.minusMinutes(30);
String time3 = zTime3.format(DateTimeFormatter.ISO_INSTANT);
ZonedDateTime zTime4 = zTime3.minusMinutes(30);
String time4 = zTime4.format(DateTimeFormatter.ISO_INSTANT);
final Collection<Statement> statements = Sets.newHashSet(vf.createStatement(vf.createURI("urn:obs_1"), vf.createURI("uri:hasTime"), vf.createLiteral(dtf.newXMLGregorianCalendar(time1))), vf.createStatement(vf.createURI("urn:obs_1"), vf.createURI("uri:hasId"), vf.createLiteral("id_1")), vf.createStatement(vf.createURI("urn:obs_2"), vf.createURI("uri:hasTime"), vf.createLiteral(dtf.newXMLGregorianCalendar(time2))), vf.createStatement(vf.createURI("urn:obs_2"), vf.createURI("uri:hasId"), vf.createLiteral("id_2")), vf.createStatement(vf.createURI("urn:obs_3"), vf.createURI("uri:hasTime"), vf.createLiteral(dtf.newXMLGregorianCalendar(time3))), vf.createStatement(vf.createURI("urn:obs_3"), vf.createURI("uri:hasId"), vf.createLiteral("id_3")), vf.createStatement(vf.createURI("urn:obs_4"), vf.createURI("uri:hasTime"), vf.createLiteral(dtf.newXMLGregorianCalendar(time4))), vf.createStatement(vf.createURI("urn:obs_4"), vf.createURI("uri:hasId"), vf.createLiteral("id_4")), vf.createStatement(vf.createURI("urn:obs_1"), vf.createURI("uri:hasTime"), vf.createLiteral(dtf.newXMLGregorianCalendar(time4))), vf.createStatement(vf.createURI("urn:obs_1"), vf.createURI("uri:hasId"), vf.createLiteral("id_1")), vf.createStatement(vf.createURI("urn:obs_2"), vf.createURI("uri:hasTime"), vf.createLiteral(dtf.newXMLGregorianCalendar(time3))), vf.createStatement(vf.createURI("urn:obs_2"), vf.createURI("uri:hasId"), vf.createLiteral("id_2")));
// add statements to Fluo
InsertTriples inserter = new InsertTriples();
statements.forEach(x -> inserter.insert(fluo, RdfToRyaConversions.convertStatement(x)));
super.getMiniFluo().waitForObservers();
// FluoITHelper.printFluoTable(fluo);
// Create the expected results of the SPARQL query once the PCJ has been
// computed.
final Set<BindingSet> expected1 = new HashSet<>();
final Set<BindingSet> expected2 = new HashSet<>();
final Set<BindingSet> expected3 = new HashSet<>();
final Set<BindingSet> expected4 = new HashSet<>();
long period = 1800000;
long binId = (currentTime / period) * period;
long bin1 = binId;
long bin2 = binId + period;
long bin3 = binId + 2 * period;
long bin4 = binId + 3 * period;
MapBindingSet bs = new MapBindingSet();
bs.addBinding("total", vf.createLiteral("2", XMLSchema.INTEGER));
bs.addBinding("id", vf.createLiteral("id_1", XMLSchema.STRING));
bs.addBinding("periodicBinId", vf.createLiteral(bin1));
expected1.add(bs);
bs = new MapBindingSet();
bs.addBinding("total", vf.createLiteral("2", XMLSchema.INTEGER));
bs.addBinding("id", vf.createLiteral("id_2", XMLSchema.STRING));
bs.addBinding("periodicBinId", vf.createLiteral(bin1));
expected1.add(bs);
bs = new MapBindingSet();
bs.addBinding("total", vf.createLiteral("1", XMLSchema.INTEGER));
bs.addBinding("id", vf.createLiteral("id_3", XMLSchema.STRING));
bs.addBinding("periodicBinId", vf.createLiteral(bin1));
expected1.add(bs);
bs = new MapBindingSet();
bs.addBinding("total", vf.createLiteral("1", XMLSchema.INTEGER));
bs.addBinding("id", vf.createLiteral("id_4", XMLSchema.STRING));
bs.addBinding("periodicBinId", vf.createLiteral(bin1));
expected1.add(bs);
bs = new MapBindingSet();
bs.addBinding("total", vf.createLiteral("1", XMLSchema.INTEGER));
bs.addBinding("id", vf.createLiteral("id_1", XMLSchema.STRING));
bs.addBinding("periodicBinId", vf.createLiteral(bin2));
expected2.add(bs);
bs = new MapBindingSet();
bs.addBinding("total", vf.createLiteral("2", XMLSchema.INTEGER));
bs.addBinding("id", vf.createLiteral("id_2", XMLSchema.STRING));
bs.addBinding("periodicBinId", vf.createLiteral(bin2));
expected2.add(bs);
bs = new MapBindingSet();
bs.addBinding("total", vf.createLiteral("1", XMLSchema.INTEGER));
bs.addBinding("id", vf.createLiteral("id_3", XMLSchema.STRING));
bs.addBinding("periodicBinId", vf.createLiteral(bin2));
expected2.add(bs);
bs = new MapBindingSet();
bs.addBinding("total", vf.createLiteral("1", XMLSchema.INTEGER));
bs.addBinding("id", vf.createLiteral("id_1", XMLSchema.STRING));
bs.addBinding("periodicBinId", vf.createLiteral(bin3));
expected3.add(bs);
bs = new MapBindingSet();
bs.addBinding("total", vf.createLiteral("1", XMLSchema.INTEGER));
bs.addBinding("id", vf.createLiteral("id_2", XMLSchema.STRING));
bs.addBinding("periodicBinId", vf.createLiteral(bin3));
expected3.add(bs);
bs = new MapBindingSet();
bs.addBinding("total", vf.createLiteral("1", XMLSchema.INTEGER));
bs.addBinding("id", vf.createLiteral("id_1", XMLSchema.STRING));
bs.addBinding("periodicBinId", vf.createLiteral(bin4));
expected4.add(bs);
// make sure that expected and actual results align after ingest
compareResults(periodicStorage, queryId, bin1, expected1);
compareResults(periodicStorage, queryId, bin2, expected2);
compareResults(periodicStorage, queryId, bin3, expected3);
compareResults(periodicStorage, queryId, bin4, expected4);
BlockingQueue<NodeBin> bins = new LinkedBlockingQueue<>();
PeriodicQueryPrunerExecutor pruner = new PeriodicQueryPrunerExecutor(periodicStorage, fluo, 1, bins);
pruner.start();
bins.add(new NodeBin(queryId, bin1));
bins.add(new NodeBin(queryId, bin2));
bins.add(new NodeBin(queryId, bin3));
bins.add(new NodeBin(queryId, bin4));
Thread.sleep(10000);
compareResults(periodicStorage, queryId, bin1, new HashSet<>());
compareResults(periodicStorage, queryId, bin2, new HashSet<>());
compareResults(periodicStorage, queryId, bin3, new HashSet<>());
compareResults(periodicStorage, queryId, bin4, new HashSet<>());
compareFluoCounts(fluo, queryId, bin1);
compareFluoCounts(fluo, queryId, bin2);
compareFluoCounts(fluo, queryId, bin3);
compareFluoCounts(fluo, queryId, bin4);
pruner.stop();
}
use of org.apache.fluo.api.client.FluoClient in project incubator-rya by apache.
the class PeriodicNotificationApplicationFactory method getPeriodicApplication.
/**
* Create a PeriodicNotificationApplication.
* @param conf - Configuration object that specifies the parameters needed to create the application
* @return PeriodicNotificationApplication to periodically poll Rya Fluo for new results
* @throws PeriodicApplicationException
*/
public static PeriodicNotificationApplication getPeriodicApplication(final PeriodicNotificationApplicationConfiguration conf) throws PeriodicApplicationException {
final Properties kafkaConsumerProps = getKafkaConsumerProperties(conf);
final Properties kafkaProducerProps = getKafkaProducerProperties(conf);
final BlockingQueue<TimestampedNotification> notifications = new LinkedBlockingQueue<>();
final BlockingQueue<NodeBin> bins = new LinkedBlockingQueue<>();
final BlockingQueue<BindingSetRecord> bindingSets = new LinkedBlockingQueue<>();
FluoClient fluo = null;
try {
final PeriodicQueryResultStorage storage = getPeriodicQueryResultStorage(conf);
fluo = FluoClientFactory.getFluoClient(conf.getFluoAppName(), Optional.of(conf.getFluoTableName()), conf);
final NotificationCoordinatorExecutor coordinator = getCoordinator(conf.getCoordinatorThreads(), notifications);
addRegisteredNotices(coordinator, fluo.newSnapshot());
final KafkaExporterExecutor exporter = getExporter(conf.getExporterThreads(), kafkaProducerProps, bindingSets);
final PeriodicQueryPrunerExecutor pruner = getPruner(storage, fluo, conf.getPrunerThreads(), bins);
final NotificationProcessorExecutor processor = getProcessor(storage, notifications, bins, bindingSets, conf.getProcessorThreads());
final KafkaNotificationProvider provider = getProvider(conf.getProducerThreads(), conf.getNotificationTopic(), coordinator, kafkaConsumerProps);
return PeriodicNotificationApplication.builder().setCoordinator(coordinator).setProvider(provider).setExporter(exporter).setProcessor(processor).setPruner(pruner).build();
} catch (AccumuloException | AccumuloSecurityException e) {
throw new PeriodicApplicationException(e.getMessage());
}
}
use of org.apache.fluo.api.client.FluoClient in project incubator-rya by apache.
the class PeriodicNotificationApplicationIT method addData.
private void addData(final Collection<Statement> statements) throws DatatypeConfigurationException {
// add statements to Fluo
try (FluoClient fluo = new FluoClientImpl(getFluoConfiguration())) {
final InsertTriples inserter = new InsertTriples();
statements.forEach(x -> inserter.insert(fluo, RdfToRyaConversions.convertStatement(x)));
getMiniFluo().waitForObservers();
}
}
use of org.apache.fluo.api.client.FluoClient in project incubator-rya by apache.
the class PeriodicNotificationProviderIT method testProvider.
@Test
public void testProvider() throws MalformedQueryException, InterruptedException, UnsupportedQueryException {
String sparql = // n
"prefix function: <http://org.apache.rya/function#> " + // n
"prefix time: <http://www.w3.org/2006/time#> " + // n
"select ?id (count(?obs) as ?total) where {" + // n
"Filter(function:periodic(?time, 1, .25, time:minutes)) " + // n
"?obs <uri:hasTime> ?time. " + // n
"?obs <uri:hasId> ?id } group by ?id";
BlockingQueue<TimestampedNotification> notifications = new LinkedBlockingQueue<>();
PeriodicNotificationCoordinatorExecutor coord = new PeriodicNotificationCoordinatorExecutor(2, notifications);
PeriodicNotificationProvider provider = new PeriodicNotificationProvider();
CreateFluoPcj pcj = new CreateFluoPcj();
String id = null;
try (FluoClient fluo = new FluoClientImpl(getFluoConfiguration())) {
id = pcj.createPcj(FluoQueryUtils.createNewPcjId(), sparql, Sets.newHashSet(), fluo).getQueryId();
provider.processRegisteredNotifications(coord, fluo.newSnapshot());
}
TimestampedNotification notification = notifications.take();
Assert.assertEquals(5000, notification.getInitialDelay());
Assert.assertEquals(15000, notification.getPeriod());
Assert.assertEquals(TimeUnit.MILLISECONDS, notification.getTimeUnit());
Assert.assertEquals(FluoQueryUtils.convertFluoQueryIdToPcjId(id), notification.getId());
}
Aggregations