use of org.eclipse.xtext.builder.resourceloader.IResourceLoader.LoadOperation in project xtext-eclipse by eclipse.
the class ClusteringBuilderState method writeNewResourceDescriptions.
/**
* Create new resource descriptions for a set of resources given by their URIs.
*
* @param buildData
* The underlying data for the write operation.
* @param oldState
* The old index
* @param newState
* The new index
* @param monitor
* The progress monitor used for user feedback
*/
protected void writeNewResourceDescriptions(BuildData buildData, IResourceDescriptions oldState, CurrentDescriptions newState, final IProgressMonitor monitor) {
int index = 0;
ResourceSet resourceSet = buildData.getResourceSet();
Set<URI> toBeUpdated = buildData.getToBeUpdated();
final int n = toBeUpdated.size();
// TODO: NLS
final SubMonitor subMonitor = SubMonitor.convert(monitor, "Write new resource descriptions", n / MONITOR_WRITE_CHUNK + 1);
IProject currentProject = getBuiltProject(buildData);
LoadOperation loadOperation = null;
try {
compilerPhases.setIndexing(resourceSet, true);
loadOperation = globalIndexResourceLoader.create(resourceSet, currentProject);
loadOperation.load(toBeUpdated);
while (loadOperation.hasNext()) {
if (subMonitor.isCanceled()) {
loadOperation.cancel();
throw new OperationCanceledException();
}
if (!clusteringPolicy.continueProcessing(resourceSet, null, index)) {
clearResourceSet(resourceSet);
}
URI uri = null;
Resource resource = null;
try {
LoadResult loadResult = loadOperation.next();
uri = loadResult.getUri();
resource = addResource(loadResult.getResource(), resourceSet);
if (index % MONITOR_WRITE_CHUNK == 0) {
subMonitor.subTask("Writing new resource descriptions chunk " + (index / MONITOR_WRITE_CHUNK + 1) + " of " + (n / MONITOR_WRITE_CHUNK + 1));
}
if (LOGGER.isDebugEnabled()) {
LOGGER.debug("Writing new resource description " + uri);
}
final IResourceDescription.Manager manager = getResourceDescriptionManager(uri);
if (manager != null) {
// We don't care here about links, we really just want the exported objects so that we can link in the
// next phase.
final IResourceDescription description = manager.getResourceDescription(resource);
final IResourceDescription copiedDescription = new CopiedResourceDescription(description);
// We also don't care what kind of Delta we get here; it's just a temporary transport vehicle. That interface
// could do with some clean-up, too, because all we actually want to do is register the new resource
// description, not the delta.
newState.register(new DefaultResourceDescriptionDelta(oldState.getResourceDescription(uri), copiedDescription));
buildData.queueURI(uri);
}
} catch (final RuntimeException ex) {
if (ex instanceof LoadOperationException) {
uri = ((LoadOperationException) ex).getUri();
}
if (uri == null) {
// $NON-NLS-1$
LOGGER.error("Error loading resource", ex);
} else {
if (resourceSet.getURIConverter().exists(uri, Collections.emptyMap())) {
// $NON-NLS-1$
LOGGER.error("Error loading resource from: " + uri.toString(), ex);
}
if (resource != null) {
resourceSet.getResources().remove(resource);
}
final IResourceDescription oldDescription = oldState.getResourceDescription(uri);
if (oldDescription != null) {
newState.register(new DefaultResourceDescriptionDelta(oldDescription, null));
}
}
// If we couldn't load it, there's no use trying again: do not add it to the queue
}
index++;
if (index % MONITOR_WRITE_CHUNK == 0)
subMonitor.worked(1);
}
} finally {
compilerPhases.setIndexing(resourceSet, false);
if (loadOperation != null)
loadOperation.cancel();
}
}
use of org.eclipse.xtext.builder.resourceloader.IResourceLoader.LoadOperation in project xtext-eclipse by eclipse.
the class ClusteringBuilderState method doUpdate.
/**
* Actually do the build.
*
* @param buildData
* the data that should be considered for the update
* @param newData
* the new resource descriptions as they are to be persisted (the new index after the build).
* Initially contains the old resource descriptions.
* @param monitor
* The progress monitor
* @return A list of deltas describing all changes made by the build.
*/
@Override
protected Collection<Delta> doUpdate(BuildData buildData, ResourceDescriptionsData newData, IProgressMonitor monitor) {
final SubMonitor progress = SubMonitor.convert(monitor, 100);
// Step 1: Clean the set of deleted URIs. If any of them are also added, they're not deleted.
final Set<URI> toBeDeleted = buildData.getAndRemoveToBeDeleted();
// Step 2: Create a new state (old state minus the deleted resources). This, by virtue of the flag
// NAMED_BUILDER_SCOPE in the resource set's load options
// and a Guice binding, is the index that is used during the build; i.e., linking during the build will
// use this. Once the build is completed, the persistable index is reset to the contents of newState by
// virtue of the newMap, which is maintained in synch with this.
ResourceSet resourceSet = buildData.getResourceSet();
final CurrentDescriptions newState = new CurrentDescriptions(resourceSet, newData, buildData);
buildData.getSourceLevelURICache().cacheAsSourceURIs(toBeDeleted);
installSourceLevelURIs(buildData);
// Step 3: Create a queue; write new temporary resource descriptions for the added or updated resources so that we can link
// subsequently; put all the added or updated resources into the queue.
writeNewResourceDescriptions(buildData, this, newState, progress.newChild(20));
if (progress.isCanceled()) {
throw new OperationCanceledException();
}
// in this set as potential candidates.
for (final URI uri : toBeDeleted) {
newData.removeDescription(uri);
}
final Set<URI> allRemainingURIs = Sets.newLinkedHashSet(newData.getAllURIs());
allRemainingURIs.removeAll(buildData.getToBeUpdated());
for (URI remainingURI : buildData.getAllRemainingURIs()) {
allRemainingURIs.remove(remainingURI);
}
// TODO: consider to remove any entry from upstream projects and independent projects
// from the set of remaining uris (template method or service?)
// this should reduce the number of to-be-checked descriptions significantly
// for common setups (large number of reasonable sized projects)
// Our return value. It contains all the deltas resulting from this build.
final Set<Delta> allDeltas = Sets.newHashSet();
// Step 5: Put all resources depending on a deleted resource into the queue. Also register the deltas in allDeltas.
if (!toBeDeleted.isEmpty()) {
for (final URI uri : toBeDeleted) {
final IResourceDescription oldDescription = this.getResourceDescription(uri);
if (oldDescription != null) {
allDeltas.add(new DefaultResourceDescriptionDelta(oldDescription, null));
}
}
}
// Add all pending deltas to all deltas (may be scheduled java deltas)
Collection<Delta> pendingDeltas = buildData.getAndRemovePendingDeltas();
allDeltas.addAll(pendingDeltas);
queueAffectedResources(allRemainingURIs, this, newState, allDeltas, allDeltas, buildData, progress.newChild(1));
installSourceLevelURIs(buildData);
IProject currentProject = getBuiltProject(buildData);
LoadOperation loadOperation = null;
try {
Queue<URI> queue = buildData.getURIQueue();
loadOperation = crossLinkingResourceLoader.create(resourceSet, currentProject);
loadOperation.load(queue);
// Step 6: Iteratively got through the queue. For each resource, create a new resource description and queue all depending
// resources that are not yet in the delta. Validate resources. Do this in chunks.
final SubMonitor subProgress = progress.newChild(80);
CancelIndicator cancelMonitor = new MonitorBasedCancelIndicator(progress);
int index = 0;
while (!queue.isEmpty()) {
// heuristic: only 2/3 of ticks will be consumed; rest kept for affected resources
if (index % MONITOR_DO_UPDATE_CHUNK == 0) {
subProgress.setWorkRemaining(((queue.size() / MONITOR_DO_UPDATE_CHUNK) + 1) * 3);
}
int clusterIndex = 0;
final List<Delta> changedDeltas = Lists.newArrayList();
while (!queue.isEmpty()) {
if (subProgress.isCanceled()) {
loadOperation.cancel();
throw new OperationCanceledException();
}
if (!clusteringPolicy.continueProcessing(resourceSet, null, clusterIndex)) {
break;
}
URI changedURI = null;
URI actualResourceURI = null;
Resource resource = null;
Delta newDelta = null;
try {
// Load the resource and create a new resource description
LoadResult loadResult = loadOperation.next();
changedURI = loadResult.getUri();
actualResourceURI = loadResult.getResource().getURI();
resource = addResource(loadResult.getResource(), resourceSet);
if (index % MONITOR_DO_UPDATE_CHUNK == 0) {
subProgress.subTask("Updating resource descriptions chunk " + (index / MONITOR_DO_UPDATE_CHUNK + 1) + " of " + ((index + queue.size()) / MONITOR_DO_UPDATE_CHUNK + 1));
}
if (LOGGER.isDebugEnabled()) {
LOGGER.debug("Update resource description " + actualResourceURI);
}
queue.remove(changedURI);
if (toBeDeleted.contains(changedURI)) {
break;
}
buildLogger.log("indexing " + changedURI);
final IResourceDescription.Manager manager = getResourceDescriptionManager(actualResourceURI);
if (manager != null) {
// Resolve links here!
try {
EcoreUtil2.resolveLazyCrossReferences(resource, cancelMonitor);
final IResourceDescription description = manager.getResourceDescription(resource);
final IResourceDescription copiedDescription = BuilderStateUtil.create(description);
newDelta = manager.createDelta(this.getResourceDescription(actualResourceURI), copiedDescription);
} catch (OperationCanceledException e) {
loadOperation.cancel();
throw e;
} catch (WrappedException e) {
throw e;
} catch (RuntimeException e) {
LOGGER.error("Error resolving cross references on resource '" + actualResourceURI + "'", e);
throw new LoadOperationException(actualResourceURI, e);
}
}
} catch (final WrappedException ex) {
if (ex instanceof LoadOperationException) {
changedURI = ((LoadOperationException) ex).getUri();
}
Throwable cause = ex.getCause();
boolean wasResourceNotFound = false;
if (cause instanceof CoreException) {
if (IResourceStatus.RESOURCE_NOT_FOUND == ((CoreException) cause).getStatus().getCode()) {
wasResourceNotFound = true;
}
}
if (changedURI == null) {
// $NON-NLS-1$
LOGGER.error("Error loading resource", ex);
} else {
queue.remove(changedURI);
if (toBeDeleted.contains(changedURI))
break;
if (!wasResourceNotFound)
// $NON-NLS-1$
LOGGER.error("Error loading resource from: " + changedURI.toString(), ex);
if (resource != null) {
resourceSet.getResources().remove(resource);
}
final IResourceDescription oldDescription = this.getResourceDescription(changedURI);
final IResourceDescription newDesc = newState.getResourceDescription(changedURI);
ResourceDescriptionImpl indexReadyDescription = newDesc != null ? BuilderStateUtil.create(newDesc) : null;
if ((oldDescription != null || indexReadyDescription != null) && oldDescription != indexReadyDescription) {
newDelta = new DefaultResourceDescriptionDelta(oldDescription, indexReadyDescription);
}
}
}
if (newDelta != null) {
allDeltas.add(newDelta);
clusterIndex++;
if (newDelta.haveEObjectDescriptionsChanged())
changedDeltas.add(newDelta);
// Make the new resource description known and update the map.
newState.register(newDelta);
// Validate now.
if (!buildData.isIndexingOnly()) {
try {
updateMarkers(newDelta, resourceSet, subProgress);
} catch (OperationCanceledException e) {
loadOperation.cancel();
throw e;
} catch (Exception e) {
LOGGER.error("Error validating " + newDelta.getUri(), e);
}
}
}
index++;
if (index % MONITOR_DO_UPDATE_CHUNK == 0) {
subProgress.worked(1);
}
}
loadOperation.cancel();
queueAffectedResources(allRemainingURIs, this, newState, changedDeltas, allDeltas, buildData, subProgress.newChild(1));
installSourceLevelURIs(buildData);
if (queue.size() > 0) {
loadOperation = crossLinkingResourceLoader.create(resourceSet, currentProject);
loadOperation.load(queue);
}
// Release memory
if (!queue.isEmpty() && !clusteringPolicy.continueProcessing(resourceSet, null, clusterIndex))
clearResourceSet(resourceSet);
}
} finally {
if (loadOperation != null)
loadOperation.cancel();
if (!progress.isCanceled())
progress.done();
}
return allDeltas;
}
Aggregations