use of java.util.ConcurrentModificationException in project stanbol by apache.
the class RootResource method performLoadOntology.
protected ResponseBuilder performLoadOntology(MultiPartBody data, HttpHeaders headers, Origin<?>... keys) {
log.debug(" post(MultiPartBody data)");
ResponseBuilder rb = null;
IRI location = null;
// If found, it takes precedence over location.
byte[] file = null;
String format = null;
List<OWLOntologyID> aliases = new ArrayList<OWLOntologyID>();
if (data.getFormFileParameterValues("file").length > 0) {
file = data.getFormFileParameterValues("file")[0].getContent();
}
// else {
if (data.getTextParameterValues("format").length > 0) {
String value = data.getTextParameterValues("format")[0];
if (!value.equals("auto")) {
format = value;
}
}
if (data.getTextParameterValues("url").length > 0) {
String value = data.getTextParameterValues("url")[0];
try {
// To throw 400 if malformed.
URI.create(value);
location = IRI.create(value);
} catch (Exception ex) {
log.error("Malformed IRI for " + value, ex);
throw new WebApplicationException(ex, BAD_REQUEST);
}
}
if (data.getTextParameterValues("alias").length > 0) {
for (String value : data.getTextParameterValues("alias")) {
if (!"null".equals(value)) {
try {
aliases.add(OntologyUtils.decode(value));
} catch (Exception ex) {
log.error("Malformed public key for " + value, ex);
throw new WebApplicationException(ex, BAD_REQUEST);
}
}
}
}
log.debug("Parameters:");
log.debug("file: {}", file != null && file.length > 0 ? "NOT-NULL" : "null");
log.trace("file data: {}", file);
log.debug("url: {}", location);
log.debug("format: {}", format);
log.debug("alias: {}", aliases);
// Then add the file
OWLOntologyID key = null;
if (file != null && file.length > 0) {
/*
* Because the ontology provider's load method could fail after only one attempt without resetting
* the stream, we might have to do that ourselves.
*/
List<String> formats;
if (format != null && !format.trim().isEmpty()) {
formats = Collections.singletonList(format);
} else // The RESTful API has its own list of preferred formats
{
formats = Arrays.asList(RDF_XML, TURTLE, X_TURTLE, N3, N_TRIPLE, OWL_XML, FUNCTIONAL_OWL, MANCHESTER_OWL, RDF_JSON);
}
log.debug("Will try {} supported formats", formats.size());
int unsupported = 0, failed = 0;
Iterator<String> itf = formats.iterator();
if (!itf.hasNext()) {
throw new OntologyLoadingException("No suitable format found or defined.");
}
do {
String f = itf.next();
try {
// Re-instantiate the stream on every attempt
InputStream content = new BufferedInputStream(new ByteArrayInputStream(file));
// ClerezzaOWLUtils.guessOntologyID(new FileInputStream(file), Parser.getInstance(), f);
OWLOntologyID guessed = OWLUtils.guessOntologyID(content, Parser.getInstance(), f);
if (guessed != null && !guessed.isAnonymous() && ontologyProvider.hasOntology(guessed)) {
rb = Response.status(Status.CONFLICT);
this.submitted = guessed;
if (headers.getAcceptableMediaTypes().contains(MediaType.TEXT_HTML_TYPE)) {
rb.entity(new Viewable("/imports/409.ftl", this));
rb.header(HttpHeaders.CONTENT_TYPE, MediaType.TEXT_HTML + "; charset=utf-8");
}
break;
} else {
content = new BufferedInputStream(new ByteArrayInputStream(file));
key = ontologyProvider.loadInStore(content, f, true, keys);
}
} catch (UnsupportedFormatException e) {
log.warn("POST method failed for media type {}. This should not happen (should fail earlier)", headers.getMediaType());
// rb = Response.status(UNSUPPORTED_MEDIA_TYPE);
unsupported++;
} catch (IOException e) {
log.debug(">>> FAILURE format {} (I/O error)", f);
failed++;
} catch (ConcurrentModificationException e) {
log.error("Exception logged", e);
failed++;
} catch (Exception e) {
// SAXParseException and others
log.debug(">>> FAILURE format {} (parse error)", f);
log.debug("Caught exception {} : {}", e.getClass(), e.getLocalizedMessage());
log.trace("Exception logged", e);
failed++;
}
} while ((key == null) && itf.hasNext());
if ((key == null || key.isAnonymous()) && rb == null) {
if (failed > 0) {
throw new WebApplicationException(BAD_REQUEST);
} else if (unsupported > 0) {
throw new WebApplicationException(UNSUPPORTED_MEDIA_TYPE);
}
}
} else if (location != null) {
try {
// Here we try every format supported by the Java API
key = ontologyProvider.loadInStore(location, null, true, keys);
} catch (Exception e) {
log.error("Failed to load ontology from " + location, e);
Throwable cause = e.getCause();
String html = "<h1>400 Bad Request</h1>" + "<p>Failed to load ontology from <a href=\"" + location + "\" target=\"_blank\">" + location + "</a></p>";
if (cause != null)
html += "<p>logged cause was: " + cause.getLocalizedMessage().replace("<", "<").replace(">", ">") + "</p>";
return Response.status(BAD_REQUEST).type(TEXT_HTML).entity(html);
}
} else if (// No content but there are aliases.
!aliases.isEmpty()) {
for (Origin<?> origin : keys) {
if (origin.getReference() instanceof OWLOntologyID) {
OWLOntologyID primary = ((OWLOntologyID) origin.getReference());
if (ontologyProvider.getStatus(primary) != org.apache.stanbol.ontologymanager.servicesapi.ontology.OntologyProvider.Status.NO_MATCH) {
for (OWLOntologyID alias : aliases) {
try {
if (ontologyProvider.addAlias(primary, alias) && key == null) {
key = alias;
}
} catch (IllegalArgumentException ex) {
log.warn("Cannot add alias");
log.warn(" ... ontology key: {}", primary);
log.warn(" ... alias: {}", alias);
log.warn(" ... reason: ", ex);
continue;
}
}
}
}
}
} else {
log.error("Bad request");
log.error(" file is: {}", file);
throw new WebApplicationException(BAD_REQUEST);
}
if (key != null && !key.isAnonymous()) {
String uri = OntologyUtils.encode(key);
if (uri != null && !uri.isEmpty()) {
rb = Response.ok();
if (headers.getAcceptableMediaTypes().contains(MediaType.TEXT_HTML_TYPE)) {
rb.entity(new Viewable("index", this));
rb.header(HttpHeaders.CONTENT_TYPE, MediaType.TEXT_HTML + "; charset=utf-8");
}
} else {
rb = Response.ok();
}
} else if (rb == null) {
rb = Response.status(Status.INTERNAL_SERVER_ERROR);
}
return rb;
}
use of java.util.ConcurrentModificationException in project eclipse.platform.text by eclipse.
the class DocumentLineDiffer method initialize.
/**
* (Re-)initializes the differ using the current reference and <code>DiffInitializer</code>.
*
* @since 3.2 protected for testing reasons, package visible before
*/
protected synchronized void initialize() {
// make new incoming changes go into the queue of stored events, plus signal we can't restore.
fState = INITIALIZING;
if (fRightDocument == null)
return;
// there is no point in receiving updates before the job we get a new copy of the document for diffing
fIgnoreDocumentEvents = true;
if (fLeftDocument != null) {
fLeftDocument.removeDocumentListener(this);
fLeftDocument = null;
fLeftEquivalent = null;
}
// if there already is a job:
// return if it has not started yet, cancel it if already running
final Job oldJob = fInitializationJob;
if (oldJob != null) {
// don't chain up jobs if there is one waiting already.
if (oldJob.getState() == Job.WAITING) {
oldJob.wakeUp(INITIALIZE_DELAY);
return;
}
oldJob.cancel();
}
fInitializationJob = new Job(QuickDiffMessages.quickdiff_initialize) {
/*
* This is run in a different thread. As the documents might be synchronized, never ever
* access the documents in a synchronized section or expect deadlocks. See
* https://bugs.eclipse.org/bugs/show_bug.cgi?id=44692
*/
@Override
public IStatus run(IProgressMonitor monitor) {
// It will return relatively quickly as RangeDifferencer supports canceling
if (oldJob != null)
try {
oldJob.join();
} catch (InterruptedException e) {
// will not happen as no one interrupts our thread
Assert.isTrue(false);
}
// 2: get the reference document
IQuickDiffReferenceProvider provider = fReferenceProvider;
final IDocument left;
try {
left = provider == null ? null : provider.getReference(monitor);
} catch (CoreException e) {
synchronized (DocumentLineDiffer.this) {
if (isCanceled(monitor))
return Status.CANCEL_STATUS;
clearModel();
fireModelChanged();
return e.getStatus();
}
} catch (OperationCanceledException e) {
return Status.CANCEL_STATUS;
}
// Getting our own copies of the documents for offline diffing.
//
// We need to make sure that we do get all document modifications after
// copying the documents as we want to re-inject them later on to become consistent.
// fRightDocument, but not subject to change
IDocument right = fRightDocument;
// the copy of the actual (right) document
IDocument actual = null;
// the copy of the reference (left) document
IDocument reference = null;
synchronized (DocumentLineDiffer.this) {
// 4: take an early exit if the documents are not valid
if (left == null || right == null) {
if (isCanceled(monitor))
return Status.CANCEL_STATUS;
clearModel();
fireModelChanged();
return Status.OK_STATUS;
}
// set the reference document
fLeftDocument = left;
// start listening to document events.
fIgnoreDocumentEvents = false;
}
// accessing the reference document from a different thread - reference providers need
// to be able to deal with this.
left.addDocumentListener(DocumentLineDiffer.this);
// create the reference copy - note that any changes on the
// reference will trigger re-initialization anyway
reference = createCopy(left);
if (reference == null)
return Status.CANCEL_STATUS;
// create the actual copy
Object lock = null;
if (right instanceof ISynchronizable)
lock = ((ISynchronizable) right).getLockObject();
if (lock != null) {
// the document
synchronized (lock) {
synchronized (DocumentLineDiffer.this) {
if (isCanceled(monitor))
return Status.CANCEL_STATUS;
fStoredEvents.clear();
actual = createUnprotectedCopy(right);
}
}
} else {
// b) cannot lock the document
// Now this is fun. The reference documents may be PartiallySynchronizedDocuments
// which will result in a deadlock if they get changed externally before we get
// our exclusive copies.
// Here's what we do: we try over and over (without synchronization) to get copies
// without interleaving modification. If there is a document change, we just repeat.
int i = 0;
do {
// this is an arbitrary emergency exit in case a referenced document goes nuts
if (i++ == 100)
return new Status(IStatus.ERROR, TextEditorPlugin.PLUGIN_ID, IStatus.OK, NLSUtility.format(QuickDiffMessages.quickdiff_error_getting_document_content, new Object[] { left.getClass(), right.getClass() }), null);
synchronized (DocumentLineDiffer.this) {
if (isCanceled(monitor))
return Status.CANCEL_STATUS;
fStoredEvents.clear();
}
// access documents non synchronized:
// get an exclusive copy of the actual document
actual = createCopy(right);
synchronized (DocumentLineDiffer.this) {
if (isCanceled(monitor))
return Status.CANCEL_STATUS;
if (fStoredEvents.size() == 0 && actual != null)
break;
}
} while (true);
}
IHashFunction hash = new DJBHashFunction();
DocumentEquivalenceClass leftEquivalent = new DocumentEquivalenceClass(reference, hash);
fLeftEquivalent = leftEquivalent;
IRangeComparator ref = new DocEquivalenceComparator(leftEquivalent, null);
DocumentEquivalenceClass rightEquivalent = new DocumentEquivalenceClass(actual, hash);
fRightEquivalent = rightEquivalent;
IRangeComparator act = new DocEquivalenceComparator(rightEquivalent, null);
ArrayList<QuickDiffRangeDifference> diffs = asQuickDiffRangeDifference(RangeDifferencer.findRanges(fRangeDiffFactory, monitor, ref, act));
// re-inject stored events to get up to date.
synchronized (DocumentLineDiffer.this) {
if (isCanceled(monitor))
return Status.CANCEL_STATUS;
// set the new differences so we can operate on them
fDifferences = diffs;
}
// re-inject events accumulated in the meantime.
try {
do {
DocumentEvent event;
synchronized (DocumentLineDiffer.this) {
if (isCanceled(monitor))
return Status.CANCEL_STATUS;
if (fStoredEvents.isEmpty()) {
// we are back in sync with the life documents
fInitializationJob = null;
fState = SYNCHRONIZED;
fLastDifference = null;
// replace the private documents with the actual
leftEquivalent.setDocument(left);
rightEquivalent.setDocument(right);
break;
}
event = fStoredEvents.remove(0);
}
// access documents non synchronized:
IDocument copy = null;
if (event.fDocument == right)
copy = actual;
else if (event.fDocument == left)
copy = reference;
else
Assert.isTrue(false);
// copy the event to inject it into our diff copies
// don't modify the original event! See https://bugs.eclipse.org/bugs/show_bug.cgi?id=134227
event = new DocumentEvent(copy, event.fOffset, event.fLength, event.fText);
handleAboutToBeChanged(event);
// inject the event into our private copy
actual.replace(event.fOffset, event.fLength, event.fText);
handleChanged(event);
} while (true);
} catch (BadLocationException e) {
left.removeDocumentListener(DocumentLineDiffer.this);
clearModel();
initialize();
return Status.CANCEL_STATUS;
}
fireModelChanged();
return Status.OK_STATUS;
}
private boolean isCanceled(IProgressMonitor monitor) {
return fInitializationJob != this || monitor != null && monitor.isCanceled();
}
private void clearModel() {
synchronized (DocumentLineDiffer.this) {
fLeftDocument = null;
fLeftEquivalent = null;
fInitializationJob = null;
fStoredEvents.clear();
fLastDifference = null;
fDifferences.clear();
}
}
/**
* Creates a copy of <code>document</code> and catches any
* exceptions that may occur if the document is modified concurrently.
* Only call this method in a synchronized block if the document is
* an ISynchronizable and has been locked, as document.get() is called
* and may result in a deadlock otherwise.
*
* @param document the document to create a copy of
* @return a copy of the document, or <code>null</code> if an exception was thrown
*/
private IDocument createCopy(IDocument document) {
Assert.isNotNull(document);
// this fixes https://bugs.eclipse.org/bugs/show_bug.cgi?id=56091
try {
return createUnprotectedCopy(document);
} catch (NullPointerException e) {
} catch (ArrayStoreException e) {
} catch (IndexOutOfBoundsException e) {
} catch (ConcurrentModificationException e) {
} catch (NegativeArraySizeException e) {
}
return null;
}
private IDocument createUnprotectedCopy(IDocument document) {
return new Document(document.get());
}
};
fInitializationJob.setSystem(true);
fInitializationJob.setPriority(Job.DECORATE);
fInitializationJob.setProperty(IProgressConstants.NO_IMMEDIATE_ERROR_PROMPT_PROPERTY, Boolean.TRUE);
fInitializationJob.schedule(INITIALIZE_DELAY);
}
use of java.util.ConcurrentModificationException in project mobile-sdk-android by meniga.
the class MenigaHttpLogger method intercept.
@Override
public Response intercept(Chain chain) throws IOException {
Request request = chain.request();
if (logLevel == LogLevel.NONE) {
return chain.proceed(request);
}
boolean logBody = logType == LogType.BODY_ONLY || logType == LogType.BODY_AND_HEADERS;
boolean logHeaders = logType == LogType.HEADERS_ONLY || logType == LogType.BODY_AND_HEADERS;
RequestBody requestBody = request.body();
boolean hasRequestBody = requestBody != null;
String reqUrl = request.url().toString();
String requestStartMessage = request.method() + REQUEST_START + reqUrl;
if (!logHeaders && hasRequestBody) {
requestStartMessage += " (" + requestBody.contentLength() + "-byte body)";
}
log(requestStartMessage);
if (logHeaders) {
if (hasRequestBody) {
// them to be included (when available) so their values are known.
if (requestBody.contentType() != null) {
log(getHeaderString("Content-Type", requestBody.contentType().toString()));
}
if (requestBody.contentLength() != -1) {
log(getHeaderString("Content-Length", Long.toString(requestBody.contentLength())));
}
}
Headers headers = request.headers();
for (int i = 0, count = headers.size(); i < count; i++) {
String name = headers.name(i);
// Skip headers from the request body as they are explicitly logged above.
if (!"Content-Type".equalsIgnoreCase(name) && !"Content-Length".equalsIgnoreCase(name)) {
log(getHeaderString(name, headers.value(i)));
}
}
}
if (!logBody) {
log(REQUEST_END + request.method());
} else if (!hasRequestBody) {
log(BODY);
log(NO_BODY);
} else if (bodyEncoded(request.headers())) {
log(REQUEST_END + request.method() + " (encoded body omitted)");
} else {
Buffer buffer = new Buffer();
try {
requestBody.writeTo(buffer);
} catch (ConcurrentModificationException ex) {
log("Error logging body - got ConcurrentModificationException");
}
Charset charset = UTF8;
MediaType contentType = requestBody.contentType();
if (contentType != null) {
charset = contentType.charset(UTF8);
}
log(BODY);
if (isPlaintext(buffer)) {
log(buffer.readString(charset));
log(REQUEST_END + request.method() + " (" + requestBody.contentLength() + "-byte body)");
} else {
log("<" + requestBody.contentLength() + "-BYTE BINARY BODY OMITTED");
log(REQUEST_END + request.method());
}
}
long startNs = System.nanoTime();
Response response;
try {
response = chain.proceed(request);
} catch (Exception e) {
log("<---- HTTP FAILED: " + e);
throw e;
}
long tookMs = TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - startNs);
ResponseBody responseBody = response.body();
long contentLength = responseBody.contentLength();
String bodySize = contentLength != -1 ? contentLength + "-byte" : "unknown-length";
log(RESPONSE_START + reqUrl);
log("(" + tookMs + "ms" + (!logHeaders ? ", " + bodySize + " body" : "") + ")");
if (logHeaders) {
Headers headers = response.headers();
for (int i = 0, count = headers.size(); i < count; i++) {
log(getHeaderString(headers.name(i), headers.value(i)));
}
}
if (!logBody) {
log(RESPONSE_END);
} else if (!HttpHeaders.hasBody(response)) {
log(BODY);
log(NO_BODY);
log(RESPONSE_END);
} else if (bodyEncoded(response.headers())) {
log(BODY);
log("<ENCODED BODY OMITTED>");
log(RESPONSE_END);
} else {
BufferedSource source = responseBody.source();
// Buffer the entire body.
source.request(Long.MAX_VALUE);
Buffer buffer = source.buffer();
Charset charset = UTF8;
MediaType contentType = responseBody.contentType();
if (contentType != null) {
try {
charset = contentType.charset(UTF8);
} catch (UnsupportedCharsetException e) {
log(BODY);
log("<ERROR DECODING BODY; CHARSET IS LIKELY MALFORMED.>");
log(RESPONSE_END);
return response;
}
}
if (!isPlaintext(buffer)) {
log(BODY);
log("<BINARY " + buffer.size() + "-BYTE BODY OMITTED>");
log(RESPONSE_END);
return response;
}
if (contentLength != 0) {
log(BODY + " (" + buffer.size() + "-byte body)");
log(buffer.clone().readString(charset));
}
log(RESPONSE_END);
}
return response;
}
use of java.util.ConcurrentModificationException in project tomee by apache.
the class ClassLoaderUtil method clearSunJarFileFactoryCacheImpl.
/**
* Due to several different implementation changes in various JDK releases the code here is not as
* straight forward as reflecting debug items in your current runtime. There have even been breaking changes
* between 1.6 runtime builds, let alone 1.5.
* <p/>
* If you discover a new issue here please be careful to ensure the existing functionality is 'extended' and not
* just replaced to match your runtime observations.
* <p/>
* If you want to look at the mess that leads up to this then follow the source code changes made to
* the class sun.net.www.protocol.jar.JarFileFactory over several years.
*
* @param jarLocation String
* @param attempt int
*/
@SuppressWarnings({ "unchecked" })
private static synchronized void clearSunJarFileFactoryCacheImpl(final String jarLocation, final int attempt) {
if (skipClearSunJarFile.get()) {
return;
}
logger.debug("Clearing Sun JarFileFactory cache for directory " + jarLocation);
try {
final Class jarFileFactory = Class.forName("sun.net.www.protocol.jar.JarFileFactory");
// Do not generify these maps as their contents are NOT stable across runtimes.
final Field fileCacheField = jarFileFactory.getDeclaredField("fileCache");
fileCacheField.setAccessible(true);
final Map fileCache = (Map) fileCacheField.get(null);
final Map fileCacheCopy = new HashMap(fileCache);
final Field urlCacheField = jarFileFactory.getDeclaredField("urlCache");
urlCacheField.setAccessible(true);
final Map urlCache = (Map) urlCacheField.get(null);
final Map urlCacheCopy = new HashMap(urlCache);
// The only stable item we have here is the JarFile/ZipFile in this map
Iterator iterator = urlCacheCopy.entrySet().iterator();
final List urlCacheRemoveKeys = new ArrayList();
while (iterator.hasNext()) {
final Map.Entry entry = (Map.Entry) iterator.next();
final Object key = entry.getKey();
if (key instanceof ZipFile) {
final ZipFile zf = (ZipFile) key;
// getName returns File.getPath()
final File file = new File(zf.getName());
if (isParent(jarLocation, file)) {
// Flag for removal
urlCacheRemoveKeys.add(key);
}
} else {
logger.warning("Unexpected key type: " + key);
}
}
iterator = fileCacheCopy.entrySet().iterator();
final List fileCacheRemoveKeys = new ArrayList();
while (iterator.hasNext()) {
final Map.Entry entry = (Map.Entry) iterator.next();
final Object value = entry.getValue();
if (urlCacheRemoveKeys.contains(value)) {
fileCacheRemoveKeys.add(entry.getKey());
}
}
// Use these unstable values as the keys for the fileCache values.
iterator = fileCacheRemoveKeys.iterator();
while (iterator.hasNext()) {
final Object next = iterator.next();
try {
final Object remove = fileCache.remove(next);
if (null != remove) {
logger.debug("Removed item from fileCache: " + remove);
}
} catch (final Throwable e) {
logger.warning("Failed to remove item from fileCache: " + next);
}
}
iterator = urlCacheRemoveKeys.iterator();
while (iterator.hasNext()) {
final Object next = iterator.next();
try {
final Object remove = urlCache.remove(next);
try {
((ZipFile) next).close();
} catch (final Throwable e) {
// Ignore
}
if (null != remove) {
logger.debug("Removed item from urlCache: " + remove);
}
} catch (final Throwable e) {
logger.warning("Failed to remove item from urlCache: " + next);
}
}
} catch (final ConcurrentModificationException e) {
if (attempt > 0) {
clearSunJarFileFactoryCacheImpl(jarLocation, attempt - 1);
} else {
logger.error("Unable to clear Sun JarFileFactory cache after 5 attempts", e);
}
} catch (final ClassNotFoundException | NoSuchFieldException e) {
// not a sun vm
} catch (final RuntimeException re) {
if ("java.lang.reflect.InaccessibleObjectException".equals(re.getClass().getName())) {
skipClearSunJarFile.compareAndSet(false, true);
return;
}
throw re;
} catch (final Throwable e) {
if (Boolean.getBoolean("openejb.java9.hack")) {
// reflection fails cause internals are not exported, close() is called and should be fine
return;
}
logger.error("Unable to clear Sun JarFileFactory cache", e);
}
}
use of java.util.ConcurrentModificationException in project tomee by apache.
the class ManyToManyComplexPkTests method testIteratorConcurrentModification.
public void testIteratorConcurrentModification() throws Exception {
resetDB();
beginTransaction();
final Set games;
try {
final PlatformLocal platform = findPlatform(new Integer(1));
final GameLocal game = findGame(new Integer(11));
games = platform.getGames();
assertFalse(games.isEmpty());
assertEquals(2, games.size());
final Iterator iterator = games.iterator();
games.remove(game);
assertEquals(1, games.size());
try {
iterator.next();
fail("expected iterator.next() to throw an ConcurrentModificationException");
} catch (final ConcurrentModificationException expected) {
}
} finally {
completeTransaction();
}
}
Aggregations