use of eu.esdihumboldt.hale.common.core.io.report.IOReport in project hale by halestudio.
the class SchematronInstanceValidatorTest method testValidate.
private void testValidate(String schematronResource, String xmlResource, boolean expectSuccess) throws Exception {
SchematronInstanceValidator validator = new SchematronInstanceValidator();
validator.setSource(new ResourceInputSupplier(getClass(), xmlResource));
validator.setSchematronLocation(getClass().getResource(schematronResource).toURI());
IOReport report = validator.execute(null);
assertEquals("Unexpected report result", expectSuccess, report.isSuccess());
}
use of eu.esdihumboldt.hale.common.core.io.report.IOReport in project hale by halestudio.
the class ProjectValidator method execute.
/**
* @see eu.esdihumboldt.hale.common.core.io.impl.AbstractIOProvider#execute(eu.esdihumboldt.hale.common.core.io.ProgressIndicator,
* eu.esdihumboldt.hale.common.core.io.report.IOReporter)
*/
@Override
protected IOReport execute(ProgressIndicator progress, IOReporter reporter) throws IOProviderConfigurationException, IOException {
ValidatorConfigurationService service = getServiceProvider().getService(ValidatorConfigurationService.class);
if (service == null) {
reporter.setSuccess(false);
throw new RuntimeException("Unable to find validator configurations");
}
Collection<IOProviderDescriptor> validators = new ArrayList<>();
validators.addAll(HaleIO.getProviderFactories(ConfigurableInstanceValidator.class));
List<ValidatorConfiguration> configurations = service.getConfigurations();
progress.begin("Performing project validation", configurations.size());
reporter.setSuccess(true);
SubtaskProgressIndicator subProgress = new SubtaskProgressIndicator(progress);
int i = 0;
for (ValidatorConfiguration configuration : configurations) {
for (IOProviderDescriptor validatorFactory : HaleIO.filterFactoriesByConfigurationType(validators, configuration.getContentType())) {
try {
// Assert that the validator can validate the exported
// content type, skip otherwise
boolean compatible = validatorFactory.getSupportedTypes().stream().anyMatch(type -> getContentType().isKindOf(type));
if (!compatible) {
reporter.info(new IOMessageImpl(MessageFormat.format("Validator \"{0}\" skipped: cannot validate exported content type \"{1}\"", validatorFactory.getIdentifier(), getContentType().getId()), null));
continue;
}
ConfigurableInstanceValidator validator = (ConfigurableInstanceValidator) validatorFactory.createExtensionObject();
subProgress.begin(MessageFormat.format("Executing project validator ({0}/{1})", ++i, configurations.size()), ProgressIndicator.UNKNOWN);
validator.setSchemas(getSchemas());
validator.setSource(getSource());
validator.setContentType(getContentType());
validator.setServiceProvider(getServiceProvider());
validator.configure(configuration);
validator.validate();
IOReport result = validator.execute(null);
if (result != null) {
reporter.importMessages(result);
if (!result.isSuccess()) {
reporter.setSuccess(false);
}
}
} catch (Exception e) {
reporter.error(new IOMessageImpl("Error running project validator", e));
reporter.setSuccess(false);
}
subProgress.end();
progress.advance(1);
}
}
progress.end();
return reporter;
}
use of eu.esdihumboldt.hale.common.core.io.report.IOReport in project hale by halestudio.
the class PartitioningWFSWriter method execute.
@Override
protected IOReport execute(final ProgressIndicator progress, final IOReporter reporter) throws IOProviderConfigurationException, IOException {
progress.begin("Upload to WFS-T", IProgressMonitor.UNKNOWN);
try {
progress.setCurrentTask("Partitioning data");
// create the partitioner
InstanceCollectionPartitioner partitioner = StreamGmlWriter.getPartitioner(this, reporter);
// partition the graph
int threshold = getParameter(PARAM_INSTANCES_THRESHOLD).as(Integer.class, DEFAULT_INSTANCES_THRESHOLD);
try (ResourceIterator<InstanceCollection> parts = partitioner.partition(getInstances(), threshold, reporter)) {
if (partitioner.requiresImmediateConsumption()) {
// handle all parts right here, one after another
int partCount = 0;
boolean failed = false;
if (parts.hasNext()) {
while (parts.hasNext() && !progress.isCanceled()) {
partCount++;
SubtaskProgressIndicator partitionProgress = new SubtaskProgressIndicator(progress);
partitionProgress.begin("Assembling part " + partCount, ProgressIndicator.UNKNOWN);
InstanceCollection part = parts.next();
partitionProgress.end();
progress.setCurrentTask("Upload part " + partCount + ((part.hasSize()) ? (" (" + part.size() + " instances)") : ("")));
IOReport report = uploadInstances(part, reporter, new SubtaskProgressIndicator(progress));
if (!report.isSuccess()) {
failed = true;
reporter.error("Upload of part {0} - {1}", partCount, report.getSummary());
} else {
reporter.info("Upload of part {0} - {1}", partCount, report.getSummary());
}
}
reporter.setSuccess(!failed && reporter.getErrors().isEmpty());
if (!reporter.isSuccess()) {
reporter.setSummary("Errors during upload to WFS-T, please see the report.");
} else {
reporter.setSummary("Successfully uploaded data via WFS-T");
}
} else {
reporter.setSuccess(false);
reporter.setSummary("Partitioning yielded no instances to upload");
}
} else {
// can start requests with separate thread (potentially
// threads, but tests with WFSes show that this usually is
// too much to handle for the service)
int partCount = 0;
final AtomicBoolean failed = new AtomicBoolean();
if (parts.hasNext()) {
ExecutorService requestThread = Executors.newSingleThreadExecutor();
while (parts.hasNext() && !progress.isCanceled()) {
partCount++;
SubtaskProgressIndicator partitionProgress = new SubtaskProgressIndicator(// only used for first
progress);
// partitioning
if (partCount == 1)
partitionProgress.begin("Assembling part " + partCount, ProgressIndicator.UNKNOWN);
// not
final InstanceCollection part = parts.next();
// safe
if (partCount == 1)
partitionProgress.end();
progress.setCurrentTask("Upload part " + partCount + ((part.hasSize()) ? (" (" + part.size() + " instances)") : ("")));
final int currentPart = partCount;
requestThread.submit(new Runnable() {
@Override
public void run() {
try {
IOReport report = uploadInstances(part, reporter, new SubtaskProgressIndicator(progress));
if (!report.isSuccess()) {
failed.set(true);
reporter.error(new IOMessageImpl("Upload of part " + currentPart + " - " + report.getSummary(), null));
} else {
reporter.info(new IOMessageImpl("Upload of part " + currentPart + " - " + report.getSummary(), null));
}
} catch (Exception e) {
failed.set(true);
reporter.error(new IOMessageImpl("Upload of part " + currentPart + " failed", e));
}
}
});
}
// wait for requests completion
requestThread.shutdown();
if (!requestThread.awaitTermination(24, TimeUnit.HOURS)) {
reporter.error(new IOMessageImpl("Timeout reached waiting for completion of WFS requests", null));
}
reporter.setSuccess(!failed.get() && reporter.getErrors().isEmpty());
if (!reporter.isSuccess()) {
reporter.setSummary("Errors during upload to WFS-T, please see the report.");
} else {
reporter.setSummary("Successfully uploaded data via WFS-T");
}
} else {
reporter.setSuccess(false);
reporter.setSummary("Partitioning yielded no instances to upload");
}
}
}
} catch (Exception e) {
reporter.error(new IOMessageImpl("Error during attempt to upload to WFS-T", e));
reporter.setSuccess(false);
} finally {
progress.end();
}
return reporter;
}
use of eu.esdihumboldt.hale.common.core.io.report.IOReport in project hale by halestudio.
the class XLSInstanceIOTest method test.
/**
* Exports the instances created by
* {@link XLSInstanceWriterTestExamples#createInstanceCollection} into a
* temporary XLS file by executing {@link XLSInstanceWriter#execute}.
* Afterwards, the schema is read by {@link XLSSchemaReader} and the
* instances are loaded by {@link XLSInstanceReader}. Each of the imported
* instances are compared with the original instances. In addtion, a
* different set of instances is compared with the imported instances.
*/
@Test
public void test() {
// set instances to xls instance writer
XLSInstanceWriter writer = new XLSInstanceWriter();
InstanceCollection instances = XLSInstanceWriterTestExamples.createInstanceCollection();
IContentType contentType = HalePlatform.getContentTypeManager().getContentType("eu.esdihumboldt.hale.io.xls.xls");
writer.setParameter(InstanceTableIOConstants.SOLVE_NESTED_PROPERTIES, Value.of(false));
File tempDir = Files.createTempDir();
File tempFile = new File(tempDir, "data.xls");
writer.setInstances(instances);
try {
// write instances to a temporary XLS file
writer.setTarget(new FileIOSupplier(tempFile));
writer.setContentType(contentType);
IOReport report = writer.execute(null);
assertTrue(report.isSuccess());
} catch (IOProviderConfigurationException | IOException e) {
fail("Execution of xls instance writer failed.");
}
// read the schema from the temporary XLS file
XLSSchemaReader schemaReader = new XLSSchemaReader();
schemaReader.setContentType(contentType);
schemaReader.setSource(new FileIOSupplier(tempFile));
schemaReader.setParameter(CommonSchemaConstants.PARAM_TYPENAME, Value.of("ItemType"));
schemaReader.setParameter(InstanceTableIOConstants.SOLVE_NESTED_PROPERTIES, Value.of(false));
schemaReader.setParameter(InstanceTableIOConstants.SHEET_INDEX, Value.of(0));
try {
IOReport report = schemaReader.execute(null);
assertTrue(report.isSuccess());
} catch (IOProviderConfigurationException | IOException e1) {
fail("Execution of schema reader failed.");
}
Schema schema = schemaReader.getSchema();
// read the instances form the temporary XLS file
XLSInstanceReader reader = new XLSInstanceReader();
reader.setSourceSchema(schema);
reader.setParameter(CommonSchemaConstants.PARAM_SKIP_FIRST_LINE, Value.of(true));
reader.setParameter(CommonSchemaConstants.PARAM_TYPENAME, Value.of("ItemType"));
reader.setParameter(InstanceTableIOConstants.SOLVE_NESTED_PROPERTIES, Value.of(false));
// read sheet with index 0 since there is only one sheet
reader.setParameter(InstanceTableIOConstants.SHEET_INDEX, Value.of(0));
reader.setContentType(contentType);
reader.setSource(new FileIOSupplier(tempFile));
try {
IOReport report = reader.execute(null);
assertTrue(report.isSuccess());
} catch (IOProviderConfigurationException | IOException e) {
fail("Execution of xls instance reader failed.");
}
// compare size of instance collection
InstanceCollection inst = reader.getInstances();
assertEquals(4, inst.size());
// check if instance collection contains current instance
Iterator<Instance> instanceIt = inst.iterator();
while (instanceIt.hasNext()) {
Instance instance = instanceIt.next();
assertTrue(contains(instances.iterator(), instance));
}
// other instance should be contained in the imported instances
InstanceCollection falseInstances = XLSInstanceWriterTestExamples.createFalseTestInstanceCollection();
instanceIt = inst.iterator();
while (instanceIt.hasNext()) {
Instance instance = instanceIt.next();
assertFalse(contains(falseInstances.iterator(), instance));
}
// delete file and temporary directory
tempFile.delete();
tempDir.delete();
}
use of eu.esdihumboldt.hale.common.core.io.report.IOReport in project hale by halestudio.
the class XLSReaderTest method readXLSSchema.
private Schema readXLSSchema(String sourceLocation, int sheetIndex, String typeName, String paramPropertyType) throws Exception {
XLSSchemaReader schemaReader = new XLSSchemaReader();
schemaReader.setSource(new DefaultInputSupplier(getClass().getResource(sourceLocation).toURI()));
schemaReader.setParameter(InstanceTableIOConstants.SHEET_INDEX, Value.of(sheetIndex));
schemaReader.setParameter(CommonSchemaConstants.PARAM_TYPENAME, Value.of(typeName));
schemaReader.setParameter(AbstractTableSchemaReader.PARAM_PROPERTYTYPE, Value.of(paramPropertyType));
IOReport report = schemaReader.execute(null);
assertTrue("Schema import was not successfull.", report.isSuccess());
return schemaReader.getSchema();
}
Aggregations