use of org.apache.rya.streams.kafka.serialization.VisibilityStatementDeserializer in project incubator-rya by apache.
the class TopologyFactory method build.
@Override
public TopologyBuilder build(final String sparqlQuery, final String statementsTopic, final String resultsTopic, final BNodeIdFactory bNodeIdFactory) throws MalformedQueryException, TopologyBuilderException {
requireNonNull(sparqlQuery);
requireNonNull(statementsTopic);
requireNonNull(resultsTopic);
final ParsedQuery parsedQuery = new SPARQLParser().parseQuery(sparqlQuery, null);
final TopologyBuilder builder = new TopologyBuilder();
final TupleExpr expr = parsedQuery.getTupleExpr();
final QueryVisitor visitor = new QueryVisitor(bNodeIdFactory);
expr.visit(visitor);
processorEntryList = visitor.getProcessorEntryList();
final Map<TupleExpr, String> idMap = visitor.getIDs();
// add source node
builder.addSource(SOURCE, new StringDeserializer(), new VisibilityStatementDeserializer(), statementsTopic);
// processing the processor entry list in reverse order means we go from leaf
// nodes -> parent nodes.
// So, when the parent processing nodes get added, the upstream
// processing node will already exist.
ProcessorEntry entry = null;
for (int ii = processorEntryList.size() - 1; ii >= 0; ii--) {
entry = processorEntryList.get(ii);
// statement patterns need to be connected to the Source.
if (entry.getNode() instanceof StatementPattern) {
builder.addProcessor(entry.getID(), entry.getSupplier(), SOURCE);
} else {
final List<TupleExpr> parents = entry.getUpstreamNodes();
final String[] parentIDs = new String[parents.size()];
for (int id = 0; id < parents.size(); id++) {
parentIDs[id] = idMap.get(parents.get(id));
}
builder.addProcessor(entry.getID(), entry.getSupplier(), parentIDs);
}
// Add a state store for any node type that requires one.
if (entry.getNode() instanceof Join || entry.getNode() instanceof LeftJoin || entry.getNode() instanceof Group) {
// Add a state store for the join processor.
final StateStoreSupplier joinStoreSupplier = Stores.create(entry.getID()).withStringKeys().withValues(new VisibilityBindingSetSerde()).persistent().build();
builder.addStateStore(joinStoreSupplier, entry.getID());
}
}
// Add a formatter that converts the ProcessorResults into the output format.
final SinkEntry<?, ?> sinkEntry = visitor.getSinkEntry();
builder.addProcessor("OUTPUT_FORMATTER", sinkEntry.getFormatterSupplier(), entry.getID());
// Add the sink.
builder.addSink(SINK, resultsTopic, sinkEntry.getKeySerializer(), sinkEntry.getValueSerializer(), "OUTPUT_FORMATTER");
return builder;
}
Aggregations