use of eu.esdihumboldt.hale.common.core.io.impl.SubtaskProgressIndicator in project hale by halestudio.
the class ProjectValidator method execute.
/**
* @see eu.esdihumboldt.hale.common.core.io.impl.AbstractIOProvider#execute(eu.esdihumboldt.hale.common.core.io.ProgressIndicator,
* eu.esdihumboldt.hale.common.core.io.report.IOReporter)
*/
@Override
protected IOReport execute(ProgressIndicator progress, IOReporter reporter) throws IOProviderConfigurationException, IOException {
ValidatorConfigurationService service = getServiceProvider().getService(ValidatorConfigurationService.class);
if (service == null) {
reporter.setSuccess(false);
throw new RuntimeException("Unable to find validator configurations");
}
Collection<IOProviderDescriptor> validators = new ArrayList<>();
validators.addAll(HaleIO.getProviderFactories(ConfigurableInstanceValidator.class));
List<ValidatorConfiguration> configurations = service.getConfigurations();
progress.begin("Performing project validation", configurations.size());
reporter.setSuccess(true);
SubtaskProgressIndicator subProgress = new SubtaskProgressIndicator(progress);
int i = 0;
for (ValidatorConfiguration configuration : configurations) {
for (IOProviderDescriptor validatorFactory : HaleIO.filterFactoriesByConfigurationType(validators, configuration.getContentType())) {
try {
// Assert that the validator can validate the exported
// content type, skip otherwise
boolean compatible = validatorFactory.getSupportedTypes().stream().anyMatch(type -> getContentType().isKindOf(type));
if (!compatible) {
reporter.info(new IOMessageImpl(MessageFormat.format("Validator \"{0}\" skipped: cannot validate exported content type \"{1}\"", validatorFactory.getIdentifier(), getContentType().getId()), null));
continue;
}
ConfigurableInstanceValidator validator = (ConfigurableInstanceValidator) validatorFactory.createExtensionObject();
subProgress.begin(MessageFormat.format("Executing project validator ({0}/{1})", ++i, configurations.size()), ProgressIndicator.UNKNOWN);
validator.setSchemas(getSchemas());
validator.setSource(getSource());
validator.setContentType(getContentType());
validator.setServiceProvider(getServiceProvider());
validator.configure(configuration);
validator.validate();
IOReport result = validator.execute(null);
if (result != null) {
reporter.importMessages(result);
if (!result.isSuccess()) {
reporter.setSuccess(false);
}
}
} catch (Exception e) {
reporter.error(new IOMessageImpl("Error running project validator", e));
reporter.setSuccess(false);
}
subProgress.end();
progress.advance(1);
}
}
progress.end();
return reporter;
}
use of eu.esdihumboldt.hale.common.core.io.impl.SubtaskProgressIndicator in project hale by halestudio.
the class PartitioningWFSWriter method execute.
@Override
protected IOReport execute(final ProgressIndicator progress, final IOReporter reporter) throws IOProviderConfigurationException, IOException {
progress.begin("Upload to WFS-T", IProgressMonitor.UNKNOWN);
try {
progress.setCurrentTask("Partitioning data");
// create the partitioner
InstanceCollectionPartitioner partitioner = StreamGmlWriter.getPartitioner(this, reporter);
// partition the graph
int threshold = getParameter(PARAM_INSTANCES_THRESHOLD).as(Integer.class, DEFAULT_INSTANCES_THRESHOLD);
try (ResourceIterator<InstanceCollection> parts = partitioner.partition(getInstances(), threshold, reporter)) {
if (partitioner.requiresImmediateConsumption()) {
// handle all parts right here, one after another
int partCount = 0;
boolean failed = false;
if (parts.hasNext()) {
while (parts.hasNext() && !progress.isCanceled()) {
partCount++;
SubtaskProgressIndicator partitionProgress = new SubtaskProgressIndicator(progress);
partitionProgress.begin("Assembling part " + partCount, ProgressIndicator.UNKNOWN);
InstanceCollection part = parts.next();
partitionProgress.end();
progress.setCurrentTask("Upload part " + partCount + ((part.hasSize()) ? (" (" + part.size() + " instances)") : ("")));
IOReport report = uploadInstances(part, reporter, new SubtaskProgressIndicator(progress));
if (!report.isSuccess()) {
failed = true;
reporter.error("Upload of part {0} - {1}", partCount, report.getSummary());
} else {
reporter.info("Upload of part {0} - {1}", partCount, report.getSummary());
}
}
reporter.setSuccess(!failed && reporter.getErrors().isEmpty());
if (!reporter.isSuccess()) {
reporter.setSummary("Errors during upload to WFS-T, please see the report.");
} else {
reporter.setSummary("Successfully uploaded data via WFS-T");
}
} else {
reporter.setSuccess(false);
reporter.setSummary("Partitioning yielded no instances to upload");
}
} else {
// can start requests with separate thread (potentially
// threads, but tests with WFSes show that this usually is
// too much to handle for the service)
int partCount = 0;
final AtomicBoolean failed = new AtomicBoolean();
if (parts.hasNext()) {
ExecutorService requestThread = Executors.newSingleThreadExecutor();
while (parts.hasNext() && !progress.isCanceled()) {
partCount++;
SubtaskProgressIndicator partitionProgress = new SubtaskProgressIndicator(// only used for first
progress);
// partitioning
if (partCount == 1)
partitionProgress.begin("Assembling part " + partCount, ProgressIndicator.UNKNOWN);
// not
final InstanceCollection part = parts.next();
// safe
if (partCount == 1)
partitionProgress.end();
progress.setCurrentTask("Upload part " + partCount + ((part.hasSize()) ? (" (" + part.size() + " instances)") : ("")));
final int currentPart = partCount;
requestThread.submit(new Runnable() {
@Override
public void run() {
try {
IOReport report = uploadInstances(part, reporter, new SubtaskProgressIndicator(progress));
if (!report.isSuccess()) {
failed.set(true);
reporter.error(new IOMessageImpl("Upload of part " + currentPart + " - " + report.getSummary(), null));
} else {
reporter.info(new IOMessageImpl("Upload of part " + currentPart + " - " + report.getSummary(), null));
}
} catch (Exception e) {
failed.set(true);
reporter.error(new IOMessageImpl("Upload of part " + currentPart + " failed", e));
}
}
});
}
// wait for requests completion
requestThread.shutdown();
if (!requestThread.awaitTermination(24, TimeUnit.HOURS)) {
reporter.error(new IOMessageImpl("Timeout reached waiting for completion of WFS requests", null));
}
reporter.setSuccess(!failed.get() && reporter.getErrors().isEmpty());
if (!reporter.isSuccess()) {
reporter.setSummary("Errors during upload to WFS-T, please see the report.");
} else {
reporter.setSummary("Successfully uploaded data via WFS-T");
}
} else {
reporter.setSuccess(false);
reporter.setSummary("Partitioning yielded no instances to upload");
}
}
}
} catch (Exception e) {
reporter.error(new IOMessageImpl("Error during attempt to upload to WFS-T", e));
reporter.setSuccess(false);
} finally {
progress.end();
}
return reporter;
}
use of eu.esdihumboldt.hale.common.core.io.impl.SubtaskProgressIndicator in project hale by halestudio.
the class ConceptualSchemaTransformer method transform.
/**
* @see TransformationService#transform(Alignment, InstanceCollection,
* InstanceSink, ServiceProvider, ProgressIndicator)
*/
@Override
public TransformationReport transform(Alignment alignment, InstanceCollection source, InstanceSink target, ServiceProvider serviceProvider, ProgressIndicator progressIndicator) {
TransformationReporter reporter = new DefaultTransformationReporter("Instance transformation", true);
TransformationContext context = new TransformationContext(serviceProvider, alignment);
TransformationFunctionService functions = serviceProvider.getService(TransformationFunctionService.class);
final SubtaskProgressIndicator sub = new SubtaskProgressIndicator(progressIndicator) {
@Override
protected String getCombinedTaskName(String taskName, String subtaskName) {
return taskName + " (" + subtaskName + ")";
}
};
progressIndicator = sub;
target = new CountingInstanceSink(target) {
private long lastUpdate = 0;
@Override
protected void countChanged(int count) {
long now = System.currentTimeMillis();
if (now - lastUpdate > 100) {
// only update every 100
// milliseconds
lastUpdate = now;
sub.subTask(count + " transformed instances");
}
}
};
progressIndicator.begin("Transformation", ProgressIndicator.UNKNOWN);
try {
EngineManager engines = new EngineManager();
PropertyTransformer transformer = new TreePropertyTransformer(alignment, reporter, target, engines, context);
Collection<? extends Cell> typeCells = alignment.getActiveTypeCells();
// sort type cell by priority
typeCells = sortTypeCells(typeCells);
for (Cell typeCell : typeCells) {
if (progressIndicator.isCanceled()) {
break;
}
List<TypeTransformationFactory> transformations = functions.getTypeTransformations(typeCell.getTransformationIdentifier());
if (transformations == null || transformations.isEmpty()) {
reporter.error(new TransformationMessageImpl(typeCell, MessageFormat.format("No transformation for function {0} found. Skipped type transformation.", typeCell.getTransformationIdentifier()), null));
} else {
// TODO select based on e.g. preferred transformation
// engine?
TypeTransformationFactory transformation = transformations.iterator().next();
doTypeTransformation(transformation, typeCell, source, target, alignment, engines, transformer, context, reporter, progressIndicator);
}
}
progressIndicator.setCurrentTask("Wait for property transformer to complete");
// wait for the property transformer to complete
// cancel property transformations if process was canceled - this
// may leave transformed instances in inconsistent state
transformer.join(progressIndicator.isCanceled());
engines.dispose();
reporter.setSuccess(true);
return reporter;
} finally {
progressIndicator.end();
}
}
use of eu.esdihumboldt.hale.common.core.io.impl.SubtaskProgressIndicator in project hale by halestudio.
the class StreamGmlWriter method partitionByExtent.
private void partitionByExtent(ProgressIndicator progress, IOReporter reporter) throws IOException {
int maxNodes = getParameter(PARAM_PARTITION_BY_EXTENT_MAX_NODES).as(Integer.class, 1000);
String mode = getParameter(PARAM_PARTITION_BY_EXTENT_MODE).as(String.class, PARTITION_BY_EXTENT_MODE_DATASET);
final SubtaskProgressIndicator qtProgress = new SubtaskProgressIndicator(progress) {
@Override
protected String getCombinedTaskName(String taskName, String subtaskName) {
return taskName + " (" + subtaskName + ")";
}
};
// Map for instances that either contain no or multiple geometries
Map<String, InstanceReference> unhandledInstances = new HashMap<>();
QuadtreeBuilder<Point, InstanceReference> builder = new QuadtreeBuilder<>();
try (ResourceIterator<Instance> it = getInstances().iterator()) {
qtProgress.begin("Collecting geometries", getInstances().size());
final XMLInspector gadget = new XMLInspector();
int i = 0;
while (it.hasNext()) {
Instance inst = it.next();
InstanceReference instRef = getInstances().getReference(inst);
InstanceTraverser traverser = new DepthFirstInstanceTraverser();
GeometryFinder finder = new GeometryFinder(getTargetCRS());
traverser.traverse(inst, finder);
List<GeometryProperty<?>> geoms = finder.getGeometries();
if (geoms.isEmpty() || geoms.size() > 1) {
unhandledInstances.put(gadget.getIdentity(inst), instRef);
} else {
GeometryProperty<?> geomProperty = geoms.get(0);
Geometry geom = geomProperty.getGeometry();
Point centroid;
switch(mode) {
case PARTITION_BY_EXTENT_MODE_WORLD:
CoordinateReferenceSystem sourceCrs = geomProperty.getCRSDefinition().getCRS();
CodeDefinition wgs84 = new CodeDefinition("EPSG:4326");
try {
MathTransform toWgs84 = CRS.findMathTransform(sourceCrs, wgs84.getCRS());
Geometry geomWgs84 = JTS.transform(geom, toWgs84);
centroid = geomWgs84.getCentroid();
} catch (FactoryException | MismatchedDimensionException | TransformException e) {
log.error("Unable to transform geometry to WGS 84", e);
throw new IllegalStateException(e.getMessage(), e);
}
break;
case PARTITION_BY_EXTENT_MODE_DATASET:
// fall through to default
default:
centroid = geom.getCentroid();
}
builder.add(centroid, new IdentifiableInstanceReference(instRef, gadget.getIdentity(inst)));
}
qtProgress.advance(1);
if (++i % 100 == 0) {
qtProgress.setCurrentTask(MessageFormat.format("{0} instances processed", i));
}
}
qtProgress.setCurrentTask("Building quadtree");
FixedBoundaryQuadtree<InstanceReference> qt;
switch(mode) {
case PARTITION_BY_EXTENT_MODE_DATASET:
qt = builder.build(maxNodes);
break;
case PARTITION_BY_EXTENT_MODE_WORLD:
Envelope world = new Envelope(-180, 180, -90, 90);
qt = builder.build(maxNodes, world);
break;
default:
log.error(MessageFormat.format("Unrecognized extent partitioning mode \"{0}\", using dataset boundaries", mode));
qt = builder.build(maxNodes);
}
qtProgress.setCurrentTask("Performing spatial partitioning");
final Map<String, String> idToKeyMapping = new HashMap<>();
final Map<String, Collection<InstanceReference>> keyToRefsMapping = new HashMap<>();
// Instances without geometry or with multiple geometries
keyToRefsMapping.put(ExtentPartsHandler.KEY_NO_GEOMETRY, unhandledInstances.values());
unhandledInstances.keySet().stream().forEach(id -> idToKeyMapping.put(id, ExtentPartsHandler.KEY_NO_GEOMETRY));
buildMappings(qt, idToKeyMapping, keyToRefsMapping);
// Partition source instances based on quadtree tiles
Iterator<InstanceCollection> collIt = new Iterator<InstanceCollection>() {
private final Queue<String> keySet = new LinkedList<>(keyToRefsMapping.keySet());
@Override
public boolean hasNext() {
return !keySet.isEmpty();
}
@Override
public InstanceCollection next() {
String key = keySet.poll();
Collection<InstanceReference> refs = keyToRefsMapping.get(key);
InstanceCollection instColl = new DefaultInstanceCollection(refs.stream().map(ref -> getInstances().getInstance(IdentifiableInstanceReference.getRootReference(ref))).collect(Collectors.toList()));
return new ExtentPartsHandler.TreeKeyDecorator(instColl, key);
}
};
final Map<String, URI> keyToTargetMapping = new HashMap<>();
keyToRefsMapping.keySet().stream().forEach(k -> keyToTargetMapping.put(k, new File(ExtentPartsHandler.getTargetFilename(k, getTarget().getLocation())).toURI()));
final ExtentPartsHandler handler = new ExtentPartsHandler(keyToTargetMapping, idToKeyMapping);
qtProgress.end();
try {
writeParts(collIt, handler, progress, reporter);
} catch (XMLStreamException e) {
throw new IOException(e.getMessage(), e);
}
}
}
use of eu.esdihumboldt.hale.common.core.io.impl.SubtaskProgressIndicator in project hale by halestudio.
the class StreamGmlWriter method write.
/**
* Write the given instances to an {@link XMLStreamWriter}.<br>
* <br>
* Use {@link #createWriter(OutputStream, IOReporter)} to create a properly
* configured writer for this method.
*
* @param instances the instance collection
* @param writer the writer to write the instances to
* @param reporter the reporter
* @param progress the progress
* @see #createWriter(OutputStream, IOReporter)
*/
protected void write(InstanceCollection instances, PrefixAwareStreamWriter writer, ProgressIndicator progress, IOReporter reporter) {
this.writer = writer;
try {
final SubtaskProgressIndicator sub = new SubtaskProgressIndicator(progress) {
@Override
protected String getCombinedTaskName(String taskName, String subtaskName) {
return taskName + " (" + subtaskName + ")";
}
};
progress = sub;
progress.begin(getTaskName(), instances.size());
XmlElement container = findDefaultContainter(targetIndex, reporter);
TypeDefinition containerDefinition = (container == null) ? (null) : (container.getType());
QName containerName = (container == null) ? (null) : (container.getName());
if (containerDefinition == null) {
XmlElement containerElement = getConfiguredContainerElement(this, getXMLIndex());
if (containerElement != null) {
containerDefinition = containerElement.getType();
containerName = containerElement.getName();
container = containerElement;
} else {
// this is the last option, so we can throw a specific error
throw new IllegalStateException("Configured container element not found");
}
}
if (containerDefinition == null || containerName == null) {
throw new IllegalStateException("No root element/container found");
}
/*
* Add schema for container to validation schemas, if the namespace
* differs from the main namespace or additional schemas.
*
* Needed for validation based on schemaLocation attribute.
*/
if (container != null && !containerName.getNamespaceURI().equals(targetIndex.getNamespace()) && !additionalSchemas.containsKey(containerName.getNamespaceURI())) {
try {
final URI containerSchemaLoc = stripFragment(container.getLocation());
if (containerSchemaLoc != null) {
addValidationSchema(containerName.getNamespaceURI(), new Locatable() {
@Override
public URI getLocation() {
return containerSchemaLoc;
}
}, null);
}
} catch (Exception e) {
reporter.error("Could not determine location of container definition", e);
}
}
// additional schema namespace prefixes
for (Entry<String, String> schemaNs : additionalSchemaPrefixes.entrySet()) {
GmlWriterUtil.addNamespace(writer, schemaNs.getKey(), schemaNs.getValue());
}
writer.writeStartDocument();
if (documentWrapper != null) {
documentWrapper.startWrap(writer, reporter);
}
GmlWriterUtil.writeStartElement(writer, containerName);
// generate mandatory id attribute (for feature collection)
String containerId = getParameter(PARAM_CONTAINER_ID).as(String.class);
GmlWriterUtil.writeID(writer, containerDefinition, null, false, containerId);
// write schema locations
StringBuffer locations = new StringBuffer();
String noNamespaceLocation = null;
if (targetIndex.getNamespace() != null && !targetIndex.getNamespace().isEmpty()) {
locations.append(targetIndex.getNamespace());
// $NON-NLS-1$
locations.append(" ");
locations.append(targetIndex.getLocation().toString());
} else {
noNamespaceLocation = targetIndex.getLocation().toString();
}
for (Entry<String, Locatable> schema : additionalSchemas.entrySet()) {
if (schema.getKey() != null && !schema.getKey().isEmpty()) {
if (locations.length() > 0) {
// $NON-NLS-1$
locations.append(" ");
}
locations.append(schema.getKey());
// $NON-NLS-1$
locations.append(" ");
locations.append(schema.getValue().getLocation().toString());
} else {
noNamespaceLocation = schema.getValue().getLocation().toString();
}
}
if (locations.length() > 0) {
// $NON-NLS-1$
writer.writeAttribute(SCHEMA_INSTANCE_NS, "schemaLocation", locations.toString());
}
if (noNamespaceLocation != null) {
// $NON-NLS-1$
writer.writeAttribute(// $NON-NLS-1$
SCHEMA_INSTANCE_NS, // $NON-NLS-1$
"noNamespaceSchemaLocation", noNamespaceLocation);
}
writeAdditionalElements(writer, containerDefinition, reporter);
// write the instances
ResourceIterator<Instance> itInstance = instances.iterator();
try {
Map<TypeDefinition, DefinitionPath> paths = new HashMap<TypeDefinition, DefinitionPath>();
long lastUpdate = 0;
int count = 0;
Descent lastDescent = null;
while (itInstance.hasNext() && !progress.isCanceled()) {
Instance instance = itInstance.next();
TypeDefinition type = instance.getDefinition();
/*
* Skip all objects that are no features when writing to a
* GML feature collection.
*/
boolean skip = useFeatureCollection && !GmlWriterUtil.isFeatureType(type);
if (skip) {
progress.advance(1);
continue;
}
// get stored definition path for the type
DefinitionPath defPath;
if (paths.containsKey(type)) {
// get the stored path, may be null
defPath = paths.get(type);
} else {
// determine a valid definition path in the container
defPath = findMemberAttribute(containerDefinition, containerName, type);
// store path (may be null)
paths.put(type, defPath);
}
if (defPath != null) {
// write the feature
lastDescent = Descent.descend(writer, defPath, lastDescent, false);
writeMember(instance, type, reporter);
} else {
reporter.warn(new IOMessageImpl(MessageFormat.format("No compatible member attribute for type {0} found in root element {1}, one instance was skipped", type.getDisplayName(), containerName.getLocalPart()), null));
}
progress.advance(1);
count++;
long now = System.currentTimeMillis();
// only update every 100 milliseconds
if (now - lastUpdate > 100 || !itInstance.hasNext()) {
lastUpdate = now;
sub.subTask(String.valueOf(count) + " instances");
}
}
if (lastDescent != null) {
lastDescent.close();
}
} finally {
itInstance.close();
}
// FeatureCollection
writer.writeEndElement();
if (documentWrapper != null) {
documentWrapper.endWrap(writer, reporter);
}
writer.writeEndDocument();
writer.close();
reporter.setSuccess(reporter.getErrors().isEmpty());
} catch (Exception e) {
reporter.error(new IOMessageImpl(e.getLocalizedMessage(), e));
reporter.setSuccess(false);
} finally {
progress.end();
}
}
Aggregations