use of org.apache.clerezza.rdf.core.serializedform.UnsupportedFormatException in project stanbol by apache.
the class RootResource method performLoadOntology.
protected ResponseBuilder performLoadOntology(MultiPartBody data, HttpHeaders headers, Origin<?>... keys) {
log.debug(" post(MultiPartBody data)");
ResponseBuilder rb = null;
IRI location = null;
// If found, it takes precedence over location.
byte[] file = null;
String format = null;
List<OWLOntologyID> aliases = new ArrayList<OWLOntologyID>();
if (data.getFormFileParameterValues("file").length > 0) {
file = data.getFormFileParameterValues("file")[0].getContent();
}
// else {
if (data.getTextParameterValues("format").length > 0) {
String value = data.getTextParameterValues("format")[0];
if (!value.equals("auto")) {
format = value;
}
}
if (data.getTextParameterValues("url").length > 0) {
String value = data.getTextParameterValues("url")[0];
try {
// To throw 400 if malformed.
URI.create(value);
location = IRI.create(value);
} catch (Exception ex) {
log.error("Malformed IRI for " + value, ex);
throw new WebApplicationException(ex, BAD_REQUEST);
}
}
if (data.getTextParameterValues("alias").length > 0) {
for (String value : data.getTextParameterValues("alias")) {
if (!"null".equals(value)) {
try {
aliases.add(OntologyUtils.decode(value));
} catch (Exception ex) {
log.error("Malformed public key for " + value, ex);
throw new WebApplicationException(ex, BAD_REQUEST);
}
}
}
}
log.debug("Parameters:");
log.debug("file: {}", file != null && file.length > 0 ? "NOT-NULL" : "null");
log.trace("file data: {}", file);
log.debug("url: {}", location);
log.debug("format: {}", format);
log.debug("alias: {}", aliases);
// Then add the file
OWLOntologyID key = null;
if (file != null && file.length > 0) {
/*
* Because the ontology provider's load method could fail after only one attempt without resetting
* the stream, we might have to do that ourselves.
*/
List<String> formats;
if (format != null && !format.trim().isEmpty()) {
formats = Collections.singletonList(format);
} else // The RESTful API has its own list of preferred formats
{
formats = Arrays.asList(RDF_XML, TURTLE, X_TURTLE, N3, N_TRIPLE, OWL_XML, FUNCTIONAL_OWL, MANCHESTER_OWL, RDF_JSON);
}
log.debug("Will try {} supported formats", formats.size());
int unsupported = 0, failed = 0;
Iterator<String> itf = formats.iterator();
if (!itf.hasNext()) {
throw new OntologyLoadingException("No suitable format found or defined.");
}
do {
String f = itf.next();
try {
// Re-instantiate the stream on every attempt
InputStream content = new BufferedInputStream(new ByteArrayInputStream(file));
// ClerezzaOWLUtils.guessOntologyID(new FileInputStream(file), Parser.getInstance(), f);
OWLOntologyID guessed = OWLUtils.guessOntologyID(content, Parser.getInstance(), f);
if (guessed != null && !guessed.isAnonymous() && ontologyProvider.hasOntology(guessed)) {
rb = Response.status(Status.CONFLICT);
this.submitted = guessed;
if (headers.getAcceptableMediaTypes().contains(MediaType.TEXT_HTML_TYPE)) {
rb.entity(new Viewable("/imports/409.ftl", this));
rb.header(HttpHeaders.CONTENT_TYPE, MediaType.TEXT_HTML + "; charset=utf-8");
}
break;
} else {
content = new BufferedInputStream(new ByteArrayInputStream(file));
key = ontologyProvider.loadInStore(content, f, true, keys);
}
} catch (UnsupportedFormatException e) {
log.warn("POST method failed for media type {}. This should not happen (should fail earlier)", headers.getMediaType());
// rb = Response.status(UNSUPPORTED_MEDIA_TYPE);
unsupported++;
} catch (IOException e) {
log.debug(">>> FAILURE format {} (I/O error)", f);
failed++;
} catch (ConcurrentModificationException e) {
log.error("Exception logged", e);
failed++;
} catch (Exception e) {
// SAXParseException and others
log.debug(">>> FAILURE format {} (parse error)", f);
log.debug("Caught exception {} : {}", e.getClass(), e.getLocalizedMessage());
log.trace("Exception logged", e);
failed++;
}
} while ((key == null) && itf.hasNext());
if ((key == null || key.isAnonymous()) && rb == null) {
if (failed > 0) {
throw new WebApplicationException(BAD_REQUEST);
} else if (unsupported > 0) {
throw new WebApplicationException(UNSUPPORTED_MEDIA_TYPE);
}
}
} else if (location != null) {
try {
// Here we try every format supported by the Java API
key = ontologyProvider.loadInStore(location, null, true, keys);
} catch (Exception e) {
log.error("Failed to load ontology from " + location, e);
Throwable cause = e.getCause();
String html = "<h1>400 Bad Request</h1>" + "<p>Failed to load ontology from <a href=\"" + location + "\" target=\"_blank\">" + location + "</a></p>";
if (cause != null)
html += "<p>logged cause was: " + cause.getLocalizedMessage().replace("<", "<").replace(">", ">") + "</p>";
return Response.status(BAD_REQUEST).type(TEXT_HTML).entity(html);
}
} else if (// No content but there are aliases.
!aliases.isEmpty()) {
for (Origin<?> origin : keys) {
if (origin.getReference() instanceof OWLOntologyID) {
OWLOntologyID primary = ((OWLOntologyID) origin.getReference());
if (ontologyProvider.getStatus(primary) != org.apache.stanbol.ontologymanager.servicesapi.ontology.OntologyProvider.Status.NO_MATCH) {
for (OWLOntologyID alias : aliases) {
try {
if (ontologyProvider.addAlias(primary, alias) && key == null) {
key = alias;
}
} catch (IllegalArgumentException ex) {
log.warn("Cannot add alias");
log.warn(" ... ontology key: {}", primary);
log.warn(" ... alias: {}", alias);
log.warn(" ... reason: ", ex);
continue;
}
}
}
}
}
} else {
log.error("Bad request");
log.error(" file is: {}", file);
throw new WebApplicationException(BAD_REQUEST);
}
if (key != null && !key.isAnonymous()) {
String uri = OntologyUtils.encode(key);
if (uri != null && !uri.isEmpty()) {
rb = Response.ok();
if (headers.getAcceptableMediaTypes().contains(MediaType.TEXT_HTML_TYPE)) {
rb.entity(new Viewable("index", this));
rb.header(HttpHeaders.CONTENT_TYPE, MediaType.TEXT_HTML + "; charset=utf-8");
}
} else {
rb = Response.ok();
}
} else if (rb == null) {
rb = Response.status(Status.INTERNAL_SERVER_ERROR);
}
return rb;
}
use of org.apache.clerezza.rdf.core.serializedform.UnsupportedFormatException in project stanbol by apache.
the class ScopeResource method postOntology.
@POST
@Consumes({ MULTIPART_FORM_DATA })
@Produces({ TEXT_HTML, TEXT_PLAIN, RDF_XML, TURTLE, X_TURTLE, N3 })
public Response postOntology(MultiPartBody data, @PathParam("scopeid") String scopeid, @Context HttpHeaders headers) {
log.info(" post(MultiPartBody data) scope: {}", scopeid);
ResponseBuilder rb;
scope = onm.getScope(scopeid);
// TODO remove and make sure it is set across the method
rb = Response.status(BAD_REQUEST);
IRI location = null, library = null;
// If found, it takes precedence over location.
FormFile file = null;
String format = null;
Set<String> keys = new HashSet<String>();
if (data.getFormFileParameterValues("file").length > 0) {
file = data.getFormFileParameterValues("file")[0];
}
// else {
if (data.getTextParameterValues("format").length > 0) {
String value = data.getTextParameterValues("format")[0];
if (!value.equals("auto")) {
format = value;
}
}
if (data.getTextParameterValues("url").length > 0) {
String value = data.getTextParameterValues("url")[0];
try {
// To throw 400 if malformed.
URI.create(value);
location = IRI.create(value);
} catch (Exception ex) {
log.error("Malformed IRI for param url " + value, ex);
throw new WebApplicationException(ex, BAD_REQUEST);
}
}
if (data.getTextParameterValues("library").length > 0) {
String value = data.getTextParameterValues("library")[0];
try {
// To throw 400 if malformed.
URI.create(value);
library = IRI.create(value);
} catch (Exception ex) {
log.error("Malformed IRI for param library " + value, ex);
throw new WebApplicationException(ex, BAD_REQUEST);
}
}
if (data.getTextParameterValues("stored").length > 0) {
String value = data.getTextParameterValues("stored")[0];
keys.add(value);
}
log.debug("Parameters:");
log.debug("file: {}", file);
log.debug("url: {}", location);
log.debug("format: {}", format);
log.debug("keys: {}", keys);
boolean fileOk = file != null;
// }
if (fileOk || location != null || library != null) {
// File and location take precedence
// src = new GraphContentInputSource(content, format, ontologyProvider.getStore());
// Then add the file
OntologyInputSource<?> src = null;
if (fileOk) {
/*
* Because the ontology provider's load method could fail after only one attempt without
* resetting the stream, we might have to do that ourselves.
*/
List<String> formats;
if (format != null && !format.trim().isEmpty())
formats = Collections.singletonList(format);
else
// The RESTful API has its own list of preferred formats
formats = Arrays.asList(RDF_XML, TURTLE, X_TURTLE, N3, N_TRIPLE, OWL_XML, FUNCTIONAL_OWL, MANCHESTER_OWL, RDF_JSON);
int unsupported = 0, failed = 0;
Iterator<String> itf = formats.iterator();
if (!itf.hasNext())
throw new OntologyLoadingException("No suitable format found or defined.");
do {
String f = itf.next();
try {
// Re-instantiate the stream on every attempt
InputStream content = new ByteArrayInputStream(file.getContent());
// ClerezzaOWLUtils.guessOntologyID(new FileInputStream(file), Parser.getInstance(),
// f);
OWLOntologyID guessed = OWLUtils.guessOntologyID(content, Parser.getInstance(), f);
log.debug("guessed ontology id: {}", guessed);
if (guessed != null && !guessed.isAnonymous() && ontologyProvider.hasOntology(guessed)) {
// rb = Response.status(Status.CONFLICT);
this.submitted = guessed;
if (headers.getAcceptableMediaTypes().contains(MediaType.TEXT_HTML_TYPE)) {
rb.entity(new Viewable("conflict.ftl", new ScopeResultData()));
rb.header(HttpHeaders.CONTENT_TYPE, MediaType.TEXT_HTML + "; charset=utf-8");
}
break;
} else {
content = new ByteArrayInputStream(file.getContent());
log.debug("Recreated input stream for format {}", f);
src = new GraphContentInputSource(content, f, ontologyProvider.getStore());
}
} catch (UnsupportedFormatException e) {
log.warn("POST method failed for media type {}. This should not happen (should fail earlier)", headers.getMediaType());
// rb = Response.status(UNSUPPORTED_MEDIA_TYPE);
unsupported++;
} catch (IOException e) {
log.debug(">>> FAILURE format {} (I/O error)", f);
failed++;
} catch (Exception e) {
// SAXParseException and others
log.debug(">>> FAILURE format {} (parse error)", f);
failed++;
}
} while (src == null && itf.hasNext());
}
if (src != null) {
OWLOntologyID key = scope.getCustomSpace().addOntology(src);
if (key == null || key.isAnonymous())
throw new WebApplicationException(INTERNAL_SERVER_ERROR);
// FIXME ugly but will have to do for the time being
// key.split("::")[1];
String uri = OntologyUtils.encode(key);
// uri = uri.substring((ontologyProvider.getGraphPrefix() + "::").length());
if (uri != null && !uri.isEmpty()) {
rb = Response.seeOther(URI.create("/ontonet/ontology/" + scope.getID() + "/" + uri));
} else
rb = Response.ok();
} else if (rb == null)
rb = Response.status(INTERNAL_SERVER_ERROR);
}
if (!keys.isEmpty()) {
for (String key : keys) scope.getCustomSpace().addOntology(new StoredOntologySource(OntologyUtils.decode(key)));
rb = Response.seeOther(URI.create("/ontonet/ontology/" + scope.getID()));
}
// addCORSOrigin(servletContext, rb, headers);
return rb.build();
}
use of org.apache.clerezza.rdf.core.serializedform.UnsupportedFormatException in project stanbol by apache.
the class ClerezzaOntologyProvider method loadInStore.
@Override
public OWLOntologyID loadInStore(final org.semanticweb.owlapi.model.IRI ontologyIri, String formatIdentifier, boolean force, Origin<?>... origins) throws IOException {
log.debug("Loading {}", ontologyIri);
if (ontologyIri == null)
throw new IllegalArgumentException("Ontology IRI cannot be null.");
org.semanticweb.owlapi.model.IRI location = null;
if (force)
location = null;
else
for (OWLOntologyIRIMapper mapper : mappers) {
location = mapper.getDocumentIRI(ontologyIri);
if (location != null)
break;
}
if (location == null) {
if (isOfflineMode())
throw new IllegalStateException("Cannot retrieve " + ontologyIri + " while Stanbol is in offline mode. " + "No resource with that identifier was found locally.");
else
location = ontologyIri;
}
log.info("found {} in {}", ontologyIri, location);
// Add the physical IRI to the origins.
origins = Arrays.copyOf(origins, origins.length + 1);
origins[origins.length - 1] = Origin.create(ontologyIri);
checkReplaceability(origins);
// Get ordered list of preferred/supported formats, or use the specified one.
List<String> supported = OntologyUtils.getPreferredSupportedFormats(parser.getSupportedFormats());
List<String> formats;
if (formatIdentifier == null || "".equals(formatIdentifier.trim()))
formats = supported;
else {
formats = new LinkedList<String>();
// Pre-check supported format
if (supported.contains(formatIdentifier))
formats.add(formatIdentifier);
for (String sup : supported) if (sup != null && !formats.contains(sup))
formats.add(sup);
}
for (String currentFormat : formats) {
try {
final URLConnection con = location.toURI().toURL().openConnection();
con.setRequestProperty("Accept", currentFormat);
final InputStream is = con.getInputStream();
if (is != null) {
/*
* We provide the current format, so the recursive call won't be trying to sort preferred
* formats again. Also, we provide the ontologyIRI as the preferred key, since we already
* know it.
*/
OWLOntologyID key = loadInStore(is, currentFormat, force, origins);
// if (key != null && !key.isEmpty()) setLocatorMapping(ontologyIri, key);
return key;
}
} catch (UnsupportedFormatException e) {
log.debug("FAILURE format {} (unsupported). Trying next one.", currentFormat);
continue;
} catch (Exception e) {
log.debug("FAILURE format {} (parse error). Will try next one.", currentFormat);
continue;
}
}
// No parser worked, return null.
log.error("All parsers failed, giving up.");
return null;
}
use of org.apache.clerezza.rdf.core.serializedform.UnsupportedFormatException in project stanbol by apache.
the class MultiThreadedTestBase method createRdfDataIterator.
/**
* Iterator implementation that parses an RDF graph from the parsed
* {@link InputStream}. The RDF data are loaded in-memory. Because of this
* only test data that fit in-memory can be used. <p>
* Literal values (objects) of the {@link #PROPERTY_TEST_DATA_PROPERTY} are
* used as data. If this property is not present {@link #DEFAULT_TEST_DATA_PROPERTY}
* is used. If {@link #PROPERTY_TEST_DATA_PROPERTY} is set to '*' than all
* Triples with Literal values are used.<p>
* This supports all RDF-formats supported by the {@link JenaParserProvider} and
* {@link RdfJsonParsingProvider}. The charset is expected to be UTF-8.
* @param is the input stream providing the RDF test data.
* @param mediaType the Media-Type of the stream. MUST BE supported by
* the Apache Clerezza RDF parsers.
*/
private Iterator<String> createRdfDataIterator(InputStream is, String mediaType, final String propertyString) {
final SimpleGraph graph = new SimpleGraph();
try {
rdfParser.parse(graph, is, mediaType);
} catch (UnsupportedFormatException e) {
Assert.fail("The MimeType '" + mediaType + "' of the parsed testData " + "is not supported. This utility supports plain text files as " + "as well as the RDF formats " + rdfParser.getSupportedFormats() + "If your test data uses one of those formats but it was not " + "correctly detected you can use the System property '" + PROPERTY_TEST_DATA_TYPE + "' to manually parse the Media-Type!");
}
IOUtils.closeQuietly(is);
return new Iterator<String>() {
Iterator<Triple> it = null;
String next = null;
private String getNext() {
if (it == null) {
IRI property;
if ("*".equals(propertyString)) {
// wildcard
property = null;
log.info("Iterate over values of all Triples");
} else {
property = new IRI(NamespaceMappingUtils.getConfiguredUri(nsPrefixService, propertyString));
log.info("Iterate over values of property {}", property);
}
it = graph.filter(null, property, null);
}
while (it.hasNext()) {
RDFTerm value = it.next().getObject();
if (value instanceof Literal) {
return ((Literal) value).getLexicalForm();
}
}
// no more data
return null;
}
@Override
public boolean hasNext() {
if (next == null) {
next = getNext();
}
return next != null;
}
@Override
public String next() {
if (next == null) {
next = getNext();
}
if (next == null) {
throw new NoSuchElementException("No further testData available");
} else {
String elem = next;
next = null;
return elem;
}
}
@Override
public void remove() {
throw new UnsupportedOperationException();
}
};
}
use of org.apache.clerezza.rdf.core.serializedform.UnsupportedFormatException in project stanbol by apache.
the class ClerezzaOntologyProvider method loadInStore.
@Override
public OWLOntologyID loadInStore(Object ontology, final boolean force, Origin<?>... origins) {
if (ontology == null)
throw new IllegalArgumentException("No ontology supplied.");
checkReplaceability(origins);
long before = System.currentTimeMillis();
// The final graph
Graph targetGraph;
// The supplied ontology converted to Graph
Graph rdfData;
if (ontology instanceof OWLOntology) {
// This will be in memory!
rdfData = OWLAPIToClerezzaConverter.owlOntologyToClerezzaGraph((OWLOntology) ontology);
} else if (ontology instanceof Graph) {
// This might be in memory or in persistent storage.
rdfData = (Graph) ontology;
} else
throw new UnsupportedOperationException("This ontology provider can only accept objects assignable to " + Graph.class + " or " + OWLOntology.class);
// XXX Force is ignored for the content, but the imports?
// Now we proceed to assign the primary key to the ontology.
OWLOntologyID primaryKey = null;
/*
* Compute aliases
*/
IRI graphName = null;
// Priority aliases.
List<OWLOntologyID> overrides = new ArrayList<OWLOntologyID>();
// Second-choice aliases.
List<org.semanticweb.owlapi.model.IRI> sources = new ArrayList<org.semanticweb.owlapi.model.IRI>();
// Scan origins ONCE.
for (int i = 0; i < origins.length; i++) {
Origin<?> origin = origins[i];
log.debug("Found origin at index {}", i);
if (origin == null) {
log.warn("Null origin at index {}. Skipping.", i);
continue;
}
Object ref = origin.getReference();
if (ref == null) {
log.warn("Null reference at index {}. Skipping.", i);
continue;
}
log.debug(" ... Reference is a {}", ref.getClass().getCanonicalName());
log.debug(" ... Value : {}", ref);
if (ref instanceof OWLOntologyID) {
OWLOntologyID key = (OWLOntologyID) ref;
if (primaryKey == null) {
primaryKey = key;
log.debug(" ... assigned as primary key.");
} else if (primaryKey.equals(key)) {
log.debug(" ... matches primary key. Skipping.");
} else {
overrides.add(key);
log.debug(" ... assigned as a priority alias for {}", primaryKey);
}
} else if (ref instanceof org.semanticweb.owlapi.model.IRI) {
sources.add((org.semanticweb.owlapi.model.IRI) ref);
log.debug(" ... assigned as a secondary alias (source) for {}", primaryKey);
} else if (ref instanceof IRI) {
if (graphName != null)
log.warn("ImmutableGraph name already assigned as {}. Skipping.", graphName);
else {
graphName = (IRI) ref;
log.debug(" ... assigned as a graph name for {}", primaryKey);
}
} else {
log.warn("Unhandled type for origin at index {} : {}. Skipping.", i, ref.getClass());
}
}
// The actual logical ID will be dereferenceable no matter what.
OWLOntologyID extractedId = OWLUtils.extractOntologyID(rdfData);
if (// Not overridden: set as primary key.
primaryKey == null)
// Not overridden: set as primary key.
primaryKey = extractedId;
else
// Overridden: must be an alias anyway.
overrides.add(extractedId);
if (// No overrides, no extracted ID.
primaryKey == null) {
org.semanticweb.owlapi.model.IRI z;
// The first IRI found becomes the primary key.
if (!sources.isEmpty())
z = sources.iterator().next();
else // Try the graph name
if (graphName != null)
z = org.semanticweb.owlapi.model.IRI.create(graphName.getUnicodeString());
else
// Extrema ratio : compute a timestamped primary key.
z = org.semanticweb.owlapi.model.IRI.create(getClass().getCanonicalName() + "-time:" + System.currentTimeMillis());
primaryKey = new OWLOntologyID(z);
}
// Check if it is possible to avoid reloading the ontology content from its source.
boolean mustLoad = true;
if (!force && graphName != null && store.listGraphs().contains(graphName)) {
// Any failed check will abort the scan.
boolean condition = true;
// Check if the extracted ontology ID matches that of the supplied graph.
// XXX note that anonymous ontologies should be considered a match... or should they not?
Graph tc = store.getGraph(graphName);
OWLOntologyID idFromStore = OWLUtils.extractOntologyID(tc);
condition &= (extractedId == null && idFromStore == null) || extractedId.equals(idFromStore);
// FIXME not a good policy for graphs that change without altering the size.
if (condition && rdfData instanceof Graph)
condition &= tc.size() == rdfData.size();
mustLoad &= !condition;
}
if (!mustLoad && graphName != null) {
log.debug("ImmutableGraph with ID {} already in store. Default action is to skip storage.", graphName);
targetGraph = store.getGraph(graphName);
} else {
String iri = null;
if (primaryKey.getOntologyIRI() != null)
iri = primaryKey.getOntologyIRI().toString();
if (primaryKey.getVersionIRI() != null)
iri += ":::" + primaryKey.getVersionIRI().toString();
// s will become the graph name
String s = (iri.startsWith(prefix + "::")) ? "" : (prefix + "::");
s += iri;
graphName = new IRI(URIUtils.sanitize(s));
log.debug("Storing ontology with graph ID {}", graphName);
try {
targetGraph = store.createGraph(graphName);
} catch (EntityAlreadyExistsException e) {
if (graphName.equals(e.getEntityName()))
targetGraph = store.getGraph(graphName);
else
targetGraph = store.createGraph(graphName);
}
targetGraph.addAll(rdfData);
}
// All is already sanitized by the time we get here.
// Now do the mappings
String mappedIds = "";
// Discard unconventional ontology IDs with only the version IRI
if (primaryKey != null && primaryKey.getOntologyIRI() != null) {
// Versioned or not, the real ID mapping is always added
keymap.setMapping(primaryKey, graphName);
mappedIds += primaryKey;
// TODO map unversioned ID as well?
Triple t = new TripleImpl(keymap.buildResource(primaryKey), SIZE_IN_TRIPLES_URIREF, LiteralFactory.getInstance().createTypedLiteral(Integer.valueOf(rdfData.size())));
getMetaGraph(Graph.class).add(t);
}
// Add aliases.
for (org.semanticweb.owlapi.model.IRI source : sources) if (source != null)
overrides.add(new OWLOntologyID(source));
for (OWLOntologyID alias : overrides) if (alias != null && !alias.equals(primaryKey)) {
addAlias(primaryKey, alias);
mappedIds += " , " + alias;
}
// Do this AFTER registering the ontology, otherwise import cycles will cause infinite loops.
if (resolveImports) {
// Scan resources of type owl:Ontology, but only get the first.
Iterator<Triple> it = targetGraph.filter(null, RDF.type, OWL.Ontology);
if (it.hasNext()) {
// Scan import statements for the one owl:Ontology considered.
Iterator<Triple> it2 = targetGraph.filter(it.next().getSubject(), OWL.imports, null);
while (it2.hasNext()) {
RDFTerm obj = it2.next().getObject();
log.info("Resolving import target {}", obj);
if (obj instanceof IRI)
try {
// TODO try locals first
IRI target = (IRI) obj;
OWLOntologyID id = new OWLOntologyID(org.semanticweb.owlapi.model.IRI.create(target.getUnicodeString()));
if (keymap.getMapping(id) == null) {
// Check if it's not there already.
if (isOfflineMode())
throw new RuntimeException("Cannot load imported ontology " + obj + " while Stanbol is in offline mode.");
// TODO manage origins for imported ontologies too?
OWLOntologyID id2 = loadInStore(org.semanticweb.owlapi.model.IRI.create(((IRI) obj).getUnicodeString()), null, false);
if (id2 != null)
id = id2;
log.info("Import {} resolved.", obj);
log.debug("");
} else {
log.info("Requested import already stored. Setting dependency only.");
}
descriptor.setDependency(primaryKey, id);
} catch (UnsupportedFormatException e) {
log.warn("Failed to parse format for resource " + obj, e);
// / XXX configure to continue?
} catch (IOException e) {
log.warn("Failed to load ontology from resource " + obj, e);
// / XXX configure to continue?
}
}
}
}
log.debug(" Ontology {}", mappedIds);
if (targetGraph != null)
log.debug(" ... ({} triples)", targetGraph.size());
log.debug(" ... primary public key : {}", primaryKey);
// log.debug("--- {}", URIUtils.sanitize(s));
log.debug("Time: {} ms", (System.currentTimeMillis() - before));
// return URIUtils.sanitize(s);
return primaryKey;
}
Aggregations