use of org.eclipse.xtext.resource.impl.DefaultResourceDescriptionDelta in project xtext-eclipse by eclipse.
the class AbstractBuilderState method doClean.
protected Collection<IResourceDescription.Delta> doClean(Set<URI> toBeRemoved, IProgressMonitor monitor) throws OperationCanceledException {
SubMonitor subMonitor = SubMonitor.convert(monitor, Messages.AbstractBuilderState_2, toBeRemoved.size() / MONITOR_CHUNK_SIZE_CLEAN + 1);
subMonitor.subTask(Messages.AbstractBuilderState_2);
Set<IResourceDescription.Delta> result = newLinkedHashSet();
int i = 0;
for (URI toDelete : toBeRemoved) {
if (monitor.isCanceled()) {
throw new OperationCanceledException();
}
IResourceDescription resourceDescription = getResourceDescription(toDelete);
if (resourceDescription != null) {
result.add(new DefaultResourceDescriptionDelta(resourceDescription, null));
}
i++;
if (i % MONITOR_CHUNK_SIZE_CLEAN == 0)
subMonitor.worked(1);
}
return result;
}
use of org.eclipse.xtext.resource.impl.DefaultResourceDescriptionDelta in project xtext-eclipse by eclipse.
the class ClusteringBuilderState method writeNewResourceDescriptions.
/**
* Create new resource descriptions for a set of resources given by their URIs.
*
* @param buildData
* The underlying data for the write operation.
* @param oldState
* The old index
* @param newState
* The new index
* @param monitor
* The progress monitor used for user feedback
*/
protected void writeNewResourceDescriptions(BuildData buildData, IResourceDescriptions oldState, CurrentDescriptions newState, final IProgressMonitor monitor) {
int index = 0;
ResourceSet resourceSet = buildData.getResourceSet();
Set<URI> toBeUpdated = buildData.getToBeUpdated();
final int n = toBeUpdated.size();
// TODO: NLS
final SubMonitor subMonitor = SubMonitor.convert(monitor, "Write new resource descriptions", n / MONITOR_WRITE_CHUNK + 1);
IProject currentProject = getBuiltProject(buildData);
LoadOperation loadOperation = null;
try {
compilerPhases.setIndexing(resourceSet, true);
loadOperation = globalIndexResourceLoader.create(resourceSet, currentProject);
loadOperation.load(toBeUpdated);
while (loadOperation.hasNext()) {
if (subMonitor.isCanceled()) {
loadOperation.cancel();
throw new OperationCanceledException();
}
if (!clusteringPolicy.continueProcessing(resourceSet, null, index)) {
clearResourceSet(resourceSet);
}
URI uri = null;
Resource resource = null;
try {
LoadResult loadResult = loadOperation.next();
uri = loadResult.getUri();
resource = addResource(loadResult.getResource(), resourceSet);
if (index % MONITOR_WRITE_CHUNK == 0) {
subMonitor.subTask("Writing new resource descriptions chunk " + (index / MONITOR_WRITE_CHUNK + 1) + " of " + (n / MONITOR_WRITE_CHUNK + 1));
}
if (LOGGER.isDebugEnabled()) {
LOGGER.debug("Writing new resource description " + uri);
}
final IResourceDescription.Manager manager = getResourceDescriptionManager(uri);
if (manager != null) {
// We don't care here about links, we really just want the exported objects so that we can link in the
// next phase.
final IResourceDescription description = manager.getResourceDescription(resource);
final IResourceDescription copiedDescription = new CopiedResourceDescription(description);
// We also don't care what kind of Delta we get here; it's just a temporary transport vehicle. That interface
// could do with some clean-up, too, because all we actually want to do is register the new resource
// description, not the delta.
newState.register(new DefaultResourceDescriptionDelta(oldState.getResourceDescription(uri), copiedDescription));
buildData.queueURI(uri);
}
} catch (final RuntimeException ex) {
if (ex instanceof LoadOperationException) {
uri = ((LoadOperationException) ex).getUri();
}
if (uri == null) {
// $NON-NLS-1$
LOGGER.error("Error loading resource", ex);
} else {
if (resourceSet.getURIConverter().exists(uri, Collections.emptyMap())) {
// $NON-NLS-1$
LOGGER.error("Error loading resource from: " + uri.toString(), ex);
}
if (resource != null) {
resourceSet.getResources().remove(resource);
}
final IResourceDescription oldDescription = oldState.getResourceDescription(uri);
if (oldDescription != null) {
newState.register(new DefaultResourceDescriptionDelta(oldDescription, null));
}
}
// If we couldn't load it, there's no use trying again: do not add it to the queue
}
index++;
if (index % MONITOR_WRITE_CHUNK == 0)
subMonitor.worked(1);
}
} finally {
compilerPhases.setIndexing(resourceSet, false);
if (loadOperation != null)
loadOperation.cancel();
}
}
use of org.eclipse.xtext.resource.impl.DefaultResourceDescriptionDelta in project xtext-eclipse by eclipse.
the class ClusteringBuilderState method doUpdate.
/**
* Actually do the build.
*
* @param buildData
* the data that should be considered for the update
* @param newData
* the new resource descriptions as they are to be persisted (the new index after the build).
* Initially contains the old resource descriptions.
* @param monitor
* The progress monitor
* @return A list of deltas describing all changes made by the build.
*/
@Override
protected Collection<Delta> doUpdate(BuildData buildData, ResourceDescriptionsData newData, IProgressMonitor monitor) {
final SubMonitor progress = SubMonitor.convert(monitor, 100);
// Step 1: Clean the set of deleted URIs. If any of them are also added, they're not deleted.
final Set<URI> toBeDeleted = buildData.getAndRemoveToBeDeleted();
// Step 2: Create a new state (old state minus the deleted resources). This, by virtue of the flag
// NAMED_BUILDER_SCOPE in the resource set's load options
// and a Guice binding, is the index that is used during the build; i.e., linking during the build will
// use this. Once the build is completed, the persistable index is reset to the contents of newState by
// virtue of the newMap, which is maintained in synch with this.
ResourceSet resourceSet = buildData.getResourceSet();
final CurrentDescriptions newState = new CurrentDescriptions(resourceSet, newData, buildData);
buildData.getSourceLevelURICache().cacheAsSourceURIs(toBeDeleted);
installSourceLevelURIs(buildData);
// Step 3: Create a queue; write new temporary resource descriptions for the added or updated resources so that we can link
// subsequently; put all the added or updated resources into the queue.
writeNewResourceDescriptions(buildData, this, newState, progress.newChild(20));
if (progress.isCanceled()) {
throw new OperationCanceledException();
}
// in this set as potential candidates.
for (final URI uri : toBeDeleted) {
newData.removeDescription(uri);
}
final Set<URI> allRemainingURIs = Sets.newLinkedHashSet(newData.getAllURIs());
allRemainingURIs.removeAll(buildData.getToBeUpdated());
for (URI remainingURI : buildData.getAllRemainingURIs()) {
allRemainingURIs.remove(remainingURI);
}
// TODO: consider to remove any entry from upstream projects and independent projects
// from the set of remaining uris (template method or service?)
// this should reduce the number of to-be-checked descriptions significantly
// for common setups (large number of reasonable sized projects)
// Our return value. It contains all the deltas resulting from this build.
final Set<Delta> allDeltas = Sets.newHashSet();
// Step 5: Put all resources depending on a deleted resource into the queue. Also register the deltas in allDeltas.
if (!toBeDeleted.isEmpty()) {
for (final URI uri : toBeDeleted) {
final IResourceDescription oldDescription = this.getResourceDescription(uri);
if (oldDescription != null) {
allDeltas.add(new DefaultResourceDescriptionDelta(oldDescription, null));
}
}
}
// Add all pending deltas to all deltas (may be scheduled java deltas)
Collection<Delta> pendingDeltas = buildData.getAndRemovePendingDeltas();
allDeltas.addAll(pendingDeltas);
queueAffectedResources(allRemainingURIs, this, newState, allDeltas, allDeltas, buildData, progress.newChild(1));
installSourceLevelURIs(buildData);
IProject currentProject = getBuiltProject(buildData);
LoadOperation loadOperation = null;
try {
Queue<URI> queue = buildData.getURIQueue();
loadOperation = crossLinkingResourceLoader.create(resourceSet, currentProject);
loadOperation.load(queue);
// Step 6: Iteratively got through the queue. For each resource, create a new resource description and queue all depending
// resources that are not yet in the delta. Validate resources. Do this in chunks.
final SubMonitor subProgress = progress.newChild(80);
CancelIndicator cancelMonitor = new MonitorBasedCancelIndicator(progress);
int index = 0;
while (!queue.isEmpty()) {
// heuristic: only 2/3 of ticks will be consumed; rest kept for affected resources
if (index % MONITOR_DO_UPDATE_CHUNK == 0) {
subProgress.setWorkRemaining(((queue.size() / MONITOR_DO_UPDATE_CHUNK) + 1) * 3);
}
int clusterIndex = 0;
final List<Delta> changedDeltas = Lists.newArrayList();
while (!queue.isEmpty()) {
if (subProgress.isCanceled()) {
loadOperation.cancel();
throw new OperationCanceledException();
}
if (!clusteringPolicy.continueProcessing(resourceSet, null, clusterIndex)) {
break;
}
URI changedURI = null;
URI actualResourceURI = null;
Resource resource = null;
Delta newDelta = null;
try {
// Load the resource and create a new resource description
LoadResult loadResult = loadOperation.next();
changedURI = loadResult.getUri();
actualResourceURI = loadResult.getResource().getURI();
resource = addResource(loadResult.getResource(), resourceSet);
if (index % MONITOR_DO_UPDATE_CHUNK == 0) {
subProgress.subTask("Updating resource descriptions chunk " + (index / MONITOR_DO_UPDATE_CHUNK + 1) + " of " + ((index + queue.size()) / MONITOR_DO_UPDATE_CHUNK + 1));
}
if (LOGGER.isDebugEnabled()) {
LOGGER.debug("Update resource description " + actualResourceURI);
}
queue.remove(changedURI);
if (toBeDeleted.contains(changedURI)) {
break;
}
buildLogger.log("indexing " + changedURI);
final IResourceDescription.Manager manager = getResourceDescriptionManager(actualResourceURI);
if (manager != null) {
// Resolve links here!
try {
EcoreUtil2.resolveLazyCrossReferences(resource, cancelMonitor);
final IResourceDescription description = manager.getResourceDescription(resource);
final IResourceDescription copiedDescription = BuilderStateUtil.create(description);
newDelta = manager.createDelta(this.getResourceDescription(actualResourceURI), copiedDescription);
} catch (OperationCanceledException e) {
loadOperation.cancel();
throw e;
} catch (WrappedException e) {
throw e;
} catch (RuntimeException e) {
LOGGER.error("Error resolving cross references on resource '" + actualResourceURI + "'", e);
throw new LoadOperationException(actualResourceURI, e);
}
}
} catch (final WrappedException ex) {
if (ex instanceof LoadOperationException) {
changedURI = ((LoadOperationException) ex).getUri();
}
Throwable cause = ex.getCause();
boolean wasResourceNotFound = false;
if (cause instanceof CoreException) {
if (IResourceStatus.RESOURCE_NOT_FOUND == ((CoreException) cause).getStatus().getCode()) {
wasResourceNotFound = true;
}
}
if (changedURI == null) {
// $NON-NLS-1$
LOGGER.error("Error loading resource", ex);
} else {
queue.remove(changedURI);
if (toBeDeleted.contains(changedURI))
break;
if (!wasResourceNotFound)
// $NON-NLS-1$
LOGGER.error("Error loading resource from: " + changedURI.toString(), ex);
if (resource != null) {
resourceSet.getResources().remove(resource);
}
final IResourceDescription oldDescription = this.getResourceDescription(changedURI);
final IResourceDescription newDesc = newState.getResourceDescription(changedURI);
ResourceDescriptionImpl indexReadyDescription = newDesc != null ? BuilderStateUtil.create(newDesc) : null;
if ((oldDescription != null || indexReadyDescription != null) && oldDescription != indexReadyDescription) {
newDelta = new DefaultResourceDescriptionDelta(oldDescription, indexReadyDescription);
}
}
}
if (newDelta != null) {
allDeltas.add(newDelta);
clusterIndex++;
if (newDelta.haveEObjectDescriptionsChanged())
changedDeltas.add(newDelta);
// Make the new resource description known and update the map.
newState.register(newDelta);
// Validate now.
if (!buildData.isIndexingOnly()) {
try {
updateMarkers(newDelta, resourceSet, subProgress);
} catch (OperationCanceledException e) {
loadOperation.cancel();
throw e;
} catch (Exception e) {
LOGGER.error("Error validating " + newDelta.getUri(), e);
}
}
}
index++;
if (index % MONITOR_DO_UPDATE_CHUNK == 0) {
subProgress.worked(1);
}
}
loadOperation.cancel();
queueAffectedResources(allRemainingURIs, this, newState, changedDeltas, allDeltas, buildData, subProgress.newChild(1));
installSourceLevelURIs(buildData);
if (queue.size() > 0) {
loadOperation = crossLinkingResourceLoader.create(resourceSet, currentProject);
loadOperation.load(queue);
}
// Release memory
if (!queue.isEmpty() && !clusteringPolicy.continueProcessing(resourceSet, null, clusterIndex))
clearResourceSet(resourceSet);
}
} finally {
if (loadOperation != null)
loadOperation.cancel();
if (!progress.isCanceled())
progress.done();
}
return allDeltas;
}
use of org.eclipse.xtext.resource.impl.DefaultResourceDescriptionDelta in project xtext-core by eclipse.
the class Indexer method getDeltasForDeletedResources.
protected List<IResourceDescription.Delta> getDeltasForDeletedResources(final BuildRequest request, final ResourceDescriptionsData oldIndex, @Extension final BuildContext context) {
final ArrayList<IResourceDescription.Delta> deltas = CollectionLiterals.<IResourceDescription.Delta>newArrayList();
final Function1<URI, Boolean> _function = (URI it) -> {
IResourceServiceProvider _resourceServiceProvider = context.getResourceServiceProvider(it);
return Boolean.valueOf((_resourceServiceProvider != null));
};
final Consumer<URI> _function_1 = (URI it) -> {
this._operationCanceledManager.checkCanceled(context.getCancelIndicator());
IResourceDescription _resourceDescription = null;
if (oldIndex != null) {
_resourceDescription = oldIndex.getResourceDescription(it);
}
final IResourceDescription oldDescription = _resourceDescription;
if ((oldDescription != null)) {
final DefaultResourceDescriptionDelta delta = new DefaultResourceDescriptionDelta(oldDescription, null);
deltas.add(delta);
}
};
IterableExtensions.<URI>filter(request.getDeletedFiles(), _function).forEach(_function_1);
return deltas;
}
use of org.eclipse.xtext.resource.impl.DefaultResourceDescriptionDelta in project n4js by eclipse.
the class N4JSGenerateImmediatelyBuilderState method queueAffectedResources.
/**
* Overriding this method to make sure that resources of all affected URIs are fully re-loaded if needed, instead of
* only loading the TModule from the corresponding resource description.
* <p>
* This is required in case the URIs in an affected resource contain indices of a changed resource; just loading the
* TModule from the user data won't update these indices. For details see the example provided in IDEBUG-347.
* <p>
* NOTE: this should be removed once the URI scheme has been changed to use names instead of indices.
*/
@Override
protected void queueAffectedResources(Set<URI> allRemainingURIs, IResourceDescriptions oldState, CurrentDescriptions newState, Collection<Delta> changedDeltas, Collection<Delta> allDeltas, BuildData buildData, final IProgressMonitor monitor) {
// don't wanna copy super-class method, so using this helper to get the set of affected URIs:
final Set<URI> affectedURIs = new HashSet<>(allRemainingURIs);
super.queueAffectedResources(allRemainingURIs, oldState, newState, changedDeltas, allDeltas, buildData, monitor);
// affected URIs have been removed from allRemainingURIs by super class
affectedURIs.removeAll(allRemainingURIs);
for (URI currAffURI : affectedURIs) {
if (!N4MF_MANIFEST.equals(currAffURI.lastSegment())) {
/*-
* This logic here is required to get rid of the invalid serialized TModules information from the index
* which are working with an index based approach. Consider the below example:
*
* -------Module A------
*1 //class XYZ { }
*2 function foo() { }
*3 export public class A { }
*
* -------Module B------
*1 import { A } from "A"
*2 import { C } from "C"
*3
*4 var arrCC : Array<A>;
*5 var t2 : C = new C();
*6 t2.m(arrCC);
*
* -------Module C------
*1 import { A } from "A"
*2
*3 export public class C {
*4 m(param : Array<A>) { }
*5 }
*
*
* Commenting out line 1 in module A will trigger rebuild of A, and related module B and C in this order.
* When loading module B, module C has to be resolved as it imports it, quickly jump to module C and load
* class A from module A, class A used to have index 1 (in the serialized TModule in the Xtext index) as
* it was the second top level element, but that is not true any more, because 'foo' was just commented out,
* so index 1 in module A is not class A any more but 'foo'. With this, line 6 in module B will fail,
* because it will think that the method 'm' accepts an array of 'foo' and not A any more.
*
* The following code will be executed after A was processed and B and C are the "affectedURIs". With this
* code, we make sure that the cached TModule of C (in the user data of C's resource description) won't be
* used while processing B during proxy resolution.
*/
IResourceDescription resDesc = this.getResourceDescription(currAffURI);
ResourceDescriptionWithoutModuleUserData rdwmud = new ResourceDescriptionWithoutModuleUserData(resDesc);
newState.register(new DefaultResourceDescriptionDelta(resDesc, rdwmud));
}
}
}
Aggregations