use of org.apache.jena.tdb2.store.NodeId in project jena by apache.
the class SolverLibTDB method testForGraphName.
/**
* Find whether a specific graph name is in the quads table.
*/
static QueryIterator testForGraphName(DatasetGraphTDB ds, Node graphNode, QueryIterator input, Predicate<Tuple<NodeId>> filter, ExecutionContext execCxt) {
NodeId nid = TDBInternal.getNodeId(ds, graphNode);
boolean exists = !NodeId.isDoesNotExist(nid);
if (exists) {
// Node exists but is it used in the quad position?
NodeTupleTable ntt = ds.getQuadTable().getNodeTupleTable();
// Don't worry about abortable - this iterator should be fast
// (with normal indexing - at least one G???).
// Either it finds a starting point, or it doesn't. We are only
// interested in the first .hasNext.
Iterator<Tuple<NodeId>> iter1 = ntt.find(nid, NodeId.NodeIdAny, NodeId.NodeIdAny, NodeId.NodeIdAny);
if (filter != null)
iter1 = Iter.filter(iter1, filter);
exists = iter1.hasNext();
}
if (exists)
return input;
else {
input.close();
return QueryIterNullIterator.create(execCxt);
}
}
use of org.apache.jena.tdb2.store.NodeId in project jena by apache.
the class BindingNodeId method toString.
@Override
public String toString() {
StringBuilder sb = new StringBuilder();
boolean first = true;
for (Var v : this) {
if (!first)
sb.append(" ");
first = false;
NodeId x = get(v);
if (!NodeId.isDoesNotExist(x)) {
sb.append(v);
sb.append(" = ");
sb.append(x);
}
}
if (getParentBinding() != null) {
sb.append(" ->> ");
sb.append(getParentBinding());
}
return sb.toString();
}
use of org.apache.jena.tdb2.store.NodeId in project jena by apache.
the class BindingNodeId method putAll.
public void putAll(BindingNodeId other) {
Iterator<Var> vIter = other.iterator();
for (; vIter.hasNext(); ) {
Var v = vIter.next();
if (v == null)
throw new IllegalArgumentException("Null key");
NodeId n = other.get(v);
if (n == null)
throw new IllegalArgumentException("(" + v + "," + n + ")");
super.put(v, n);
}
}
use of org.apache.jena.tdb2.store.NodeId in project jena by apache.
the class PhasedOps method replay.
/**
* Return (Number, Time in ms)
*/
static ReplayResult replay(TupleIndex srcIdx, Destination<Tuple<NodeId>> dest, MonitorOutput output) {
ProgressMonitor monitor = ProgressMonitorFactory.progressMonitor("Index", output, LoaderMain.IndexTickPoint, LoaderMain.IndexSuperTick);
List<Tuple<NodeId>> block = null;
int len = srcIdx.getTupleLength();
monitor.start();
Iterator<Tuple<NodeId>> iter = srcIdx.all();
while (iter.hasNext()) {
if (block == null)
block = new ArrayList<>(LoaderConst.ChunkSize);
Tuple<NodeId> row = iter.next();
block.add(row);
monitor.tick();
if (block.size() == LoaderConst.ChunkSize) {
dest.deliver(block);
block = null;
}
}
if (block != null)
dest.deliver(block);
dest.deliver(Collections.emptyList());
monitor.finish();
// monitor.finishMessage("Tuples["+len+"]");
return new ReplayResult(monitor.getTicks(), monitor.getTime());
}
use of org.apache.jena.tdb2.store.NodeId in project jena by apache.
the class LoaderMain method executeData.
/**
* Create data ingestion and primary index building of a {@link LoaderPlan}.
* In phase 1, separate threads for parsing, node table loading and primary index building,
*
* Used by {@link InputStage#MULTI}.
*/
private static StreamRDFCounting executeData(LoaderPlan loaderPlan, DatasetGraphTDB dsgtdb, Map<String, TupleIndex> indexMap, List<BulkStartFinish> dataProcess, MonitorOutput output) {
StoragePrefixesTDB dps = (StoragePrefixesTDB) dsgtdb.getStoragePrefixes();
PrefixHandlerBulk prefixHandler = new PrefixHandlerBulk(dps, output);
dataProcess.add(prefixHandler);
// -- Phase 2 block. Indexer and Destination (blocks of Tuple<NodeId>)
TupleIndex[] idx3 = PhasedOps.indexSetFromNames(loaderPlan.primaryLoad3(), indexMap);
Indexer indexer3 = new Indexer(output, idx3);
TupleIndex[] idx4 = PhasedOps.indexSetFromNames(loaderPlan.primaryLoad4(), indexMap);
Indexer indexer4 = new Indexer(output, idx4);
dataProcess.add(indexer3);
dataProcess.add(indexer4);
Destination<Tuple<NodeId>> functionIndexer3 = indexer3.index();
Destination<Tuple<NodeId>> functionIndexer4 = indexer4.index();
// -- Phase 2 block.
// -- Phase 1.
// This is the other way round to AsyncParser.
// Here, we return a StreamRDF to pump data into and the rest of the
// processing is on other threads. AsyncParser has the processing on the caller thread
// and so the current thread continues when the processing from the parser is finished.
DataToTuples dtt = new DataToTuples(dsgtdb, functionIndexer3, functionIndexer4, output);
DataBatcher dataBatcher = new DataBatcher(dtt.data(), prefixHandler.handler(), output);
dataProcess.add(dtt);
dataProcess.add(dataBatcher);
return dataBatcher;
}
Aggregations