use of org.apache.jena.sparql.core.DatasetGraph in project jena by apache.
the class TestStreamRDFThrift method dataset_02.
@Test
public void dataset_02() {
DatasetGraph dsg1 = datasetGraph;
ByteArrayOutputStream out = new ByteArrayOutputStream();
StreamRDFWriter.write(out, dsg1, Lang.RDFTHRIFT);
byte[] bytes = out.toByteArray();
ByteArrayInputStream in = new ByteArrayInputStream(bytes);
DatasetGraph dsg2 = DatasetGraphFactory.create();
StreamRDF stream2 = StreamRDFLib.dataset(dsg2);
BinRDF.inputStreamToStream(in, stream2);
boolean b = IsoMatcher.isomorphic(dsg1, dsg2);
assertTrue(b);
// Stronger - same bNode and same as in original data.
Node obj = Iter.first(dsg1.listGraphNodes(), Node::isBlank);
termAsObject(dsg1, obj);
}
use of org.apache.jena.sparql.core.DatasetGraph in project jena by apache.
the class SPARQL_Upload method uploadWorker.
/** Process an HTTP file upload of RDF with additiona name field for the graph name.
* We can't stream straight into a dataset because the graph name can be after the data.
* @return graph name and count
*/
// ?? Combine with Upload.fileUploadWorker
// Difference is the handling of names for graphs.
private static UploadDetails uploadWorker(HttpAction action, String base) {
DatasetGraph dsgTmp = DatasetGraphFactory.create();
ServletFileUpload upload = new ServletFileUpload();
String graphName = null;
boolean isQuads = false;
long count = -1;
String name = null;
ContentType ct = null;
Lang lang = null;
try {
FileItemIterator iter = upload.getItemIterator(action.request);
while (iter.hasNext()) {
FileItemStream item = iter.next();
String fieldName = item.getFieldName();
InputStream stream = item.openStream();
if (item.isFormField()) {
// Graph name.
String value = Streams.asString(stream, "UTF-8");
if (fieldName.equals(HttpNames.paramGraph)) {
graphName = value;
if (graphName != null && !graphName.equals("") && !graphName.equals(HttpNames.valueDefault)) {
IRI iri = IRIResolver.parseIRI(value);
if (iri.hasViolation(false))
ServletOps.errorBadRequest("Bad IRI: " + graphName);
if (iri.getScheme() == null)
ServletOps.errorBadRequest("Bad IRI: no IRI scheme name: " + graphName);
if (iri.getScheme().equalsIgnoreCase("http") || iri.getScheme().equalsIgnoreCase("https")) {
// Redundant??
if (iri.getRawHost() == null)
ServletOps.errorBadRequest("Bad IRI: no host name: " + graphName);
if (iri.getRawPath() == null || iri.getRawPath().length() == 0)
ServletOps.errorBadRequest("Bad IRI: no path: " + graphName);
if (iri.getRawPath().charAt(0) != '/')
ServletOps.errorBadRequest("Bad IRI: Path does not start '/': " + graphName);
}
}
} else if (fieldName.equals(HttpNames.paramDefaultGraphURI))
graphName = null;
else
// Add file type?
action.log.info(format("[%d] Upload: Field=%s ignored", action.id, fieldName));
} else {
// Process the input stream
name = item.getName();
if (name == null || name.equals("") || name.equals("UNSET FILE NAME"))
ServletOps.errorBadRequest("No name for content - can't determine RDF syntax");
String contentTypeHeader = item.getContentType();
ct = ContentType.create(contentTypeHeader);
lang = RDFLanguages.contentTypeToLang(ct.getContentType());
if (lang == null) {
lang = RDFLanguages.filenameToLang(name);
// present we wrap the stream accordingly
if (name.endsWith(".gz"))
stream = new GZIPInputStream(stream);
}
if (lang == null)
// Desperate.
lang = RDFLanguages.RDFXML;
isQuads = RDFLanguages.isQuads(lang);
action.log.info(format("[%d] Upload: Filename: %s, Content-Type=%s, Charset=%s => %s", action.id, name, ct.getContentType(), ct.getCharset(), lang.getName()));
StreamRDF x = StreamRDFLib.dataset(dsgTmp);
StreamRDFCounting dest = StreamRDFLib.count(x);
ActionSPARQL.parse(action, dest, stream, lang, base);
count = dest.count();
}
}
if (graphName == null || graphName.equals(""))
graphName = HttpNames.valueDefault;
if (isQuads)
graphName = null;
return new UploadDetails(graphName, dsgTmp, count);
} catch (ActionErrorException ex) {
throw ex;
} catch (Exception ex) {
ServletOps.errorOccurred(ex);
return null;
}
}
use of org.apache.jena.sparql.core.DatasetGraph in project jena by apache.
the class ExQuadFilter method setup.
/** Example setup - in-memory dataset with two graphs, one triple in each */
private static Dataset setup() {
Dataset ds = TDBFactory.createDataset();
DatasetGraph dsg = ds.asDatasetGraph();
Quad q1 = SSE.parseQuad("(<http://example/g1> <http://example/s> <http://example/p> <http://example/o1>)");
Quad q2 = SSE.parseQuad("(<http://example/g2> <http://example/s> <http://example/p> <http://example/o2>)");
dsg.add(q1);
dsg.add(q2);
return ds;
}
use of org.apache.jena.sparql.core.DatasetGraph in project jena by apache.
the class TDB method sync.
/** Sync a TDB-backed DatasetGraph. Do nothing if not TDB-backed. */
public static void sync(DatasetGraph dataset) {
if (dataset == null)
return;
// Should be: SystemARQ.sync(dataset) ;
if (dataset instanceof DatasetGraphTDB) {
syncObject(dataset);
return;
}
if (dataset instanceof DatasetGraphTransaction) {
DatasetGraphTransaction dsgt = (DatasetGraphTransaction) dataset;
// This only sync if the dataset has not been used transactionally.
// Can't sync transactional datasets (it's meaningless)
dsgt.syncIfNotTransactional();
return;
}
// May be a general purpose dataset with TDB objects in it.
sync(dataset.getDefaultGraph());
Iterator<Node> iter = dataset.listGraphNodes();
// Avoid iterator concurrency.
iter = Iter.toList(iter).iterator();
for (; iter.hasNext(); ) {
Node n = iter.next();
Graph g = dataset.getGraph(n);
sync(g);
}
}
use of org.apache.jena.sparql.core.DatasetGraph in project jena by apache.
the class TestTransactionTDB method transaction_50.
@Test
public void transaction_50() {
// This assumes you have two datasets on the same location.
// That's not necessarily true for uncached memory datasets,
// where you get two separate datasets so changes to one are
// not seen by the other at all.
Dataset ds1 = create();
Dataset ds2 = create();
ds1.begin(WRITE);
ds1.getDefaultModel().getGraph().add(triple1);
ds2.begin(READ);
assertTrue(ds2.getDefaultModel().isEmpty());
ds2.commit();
ds1.commit();
ds2.begin(READ);
// See ds1 updates
Graph g = ds2.getDefaultModel().getGraph();
DatasetGraph dsg = ds2.asDatasetGraph();
g = dsg.getDefaultGraph();
boolean b0 = g.isEmpty();
boolean b1 = ds2.getDefaultModel().isEmpty();
assertFalse(ds2.getDefaultModel().isEmpty());
assertEquals(1, ds2.getDefaultModel().size());
ds2.commit();
}
Aggregations