use of de.fhg.igd.slf4jplus.ATransaction in project hale by halestudio.
the class ExportJob method run.
/**
* @see org.eclipse.core.runtime.jobs.Job#run(org.eclipse.core.runtime.IProgressMonitor)
*/
@Override
protected IStatus run(IProgressMonitor monitor) {
IOReporter defaultReporter = writer.createReporter();
defaultReporter.setSuccess(false);
IOReport report = defaultReporter;
try {
ATransaction trans = log.begin(defaultReporter.getTaskName());
try {
IOReport result = writer.execute(new ProgressMonitorIndicator(monitor));
if (result != null) {
report = result;
} else {
defaultReporter.setSuccess(true);
}
} catch (Throwable e) {
defaultReporter.error(new IOMessageImpl(e.getLocalizedMessage(), e));
} finally {
trans.end();
}
} catch (Throwable e) {
defaultReporter.error(new IOMessageImpl(e.getLocalizedMessage(), e));
}
if (monitor.isCanceled()) {
reset();
return Status.CANCEL_STATUS;
}
// add report to report service
reportHandler.publishReport(report);
// show message to user
if (report.isSuccess()) {
// no message, we rely on the report being
// shown/processed
// let advisor handle results
advisor.handleResults(writer);
reset();
return Status.OK_STATUS;
} else {
reset();
log.error(report.getSummary());
return new Status(Status.ERROR, "eu.esdihumboldt.hale.common.headless", report.getSummary(), null);
}
}
use of de.fhg.igd.slf4jplus.ATransaction in project hale by halestudio.
the class ValidationJob method run.
/**
* @see org.eclipse.core.runtime.jobs.Job#run(org.eclipse.core.runtime.IProgressMonitor)
*/
@Override
protected IStatus run(IProgressMonitor monitor) {
boolean successful = true;
for (InstanceValidator validator : this.validators) {
IOReporter defaultReporter = validator.createReporter();
defaultReporter.setSuccess(false);
IOReport report = defaultReporter;
try {
ATransaction trans = log.begin(defaultReporter.getTaskName());
try {
if (writer != null) {
// set validation schemas (may have been determined only
// during writer execution)
// set schemas
List<? extends Locatable> schemas = writer.getValidationSchemas();
validator.setSchemas(schemas.toArray(new Locatable[schemas.size()]));
}
validator.setServiceProvider(serviceProvider);
IOReport result = validator.execute(new ProgressMonitorIndicator(monitor));
if (result != null) {
report = result;
} else {
defaultReporter.setSuccess(true);
}
} catch (Throwable e) {
defaultReporter.error(new IOMessageImpl(e.getLocalizedMessage(), e));
} finally {
trans.end();
}
} catch (Throwable e) {
defaultReporter.error(new IOMessageImpl(e.getLocalizedMessage(), e));
}
if (monitor.isCanceled()) {
reset();
return Status.CANCEL_STATUS;
}
// add report to report service
reportHandler.publishReport(report);
// show message to user
if (report.isSuccess()) {
// info message
log.info(report.getSummary());
} else {
// error message
log.error(report.getSummary());
successful = false;
}
}
reset();
if (successful) {
log.userInfo("All validations completed successfully.");
return Status.OK_STATUS;
} else {
log.userError("There were validation failures. Please check the reports for details.");
return ERROR_STATUS;
}
}
use of de.fhg.igd.slf4jplus.ATransaction in project hale by halestudio.
the class StoreInstancesJob method run.
/**
* @see Job#run(IProgressMonitor)
*/
@Override
public IStatus run(IProgressMonitor monitor) {
boolean exactProgress = instances.hasSize();
monitor.beginTask("Store instances in database", (exactProgress) ? (instances.size()) : (IProgressMonitor.UNKNOWN));
AtomicInteger count = new AtomicInteger(0);
TObjectIntHashMap<QName> typeCount = new TObjectIntHashMap<>();
if (report != null) {
// set the correct start time
report.setStartTime(new Date());
}
// get database connection
DatabaseReference<ODatabaseDocumentTx> ref = database.openWrite();
ODatabaseDocumentTx db = ref.getDatabase();
ATransaction trans = log.begin("Store instances in database");
try {
// use intent
db.declareIntent(new OIntentMassiveInsert());
// Find all the InstanceProcessors to feed them the stored Instances
final List<InstanceProcessor> processors;
if (doProcessing) {
final InstanceProcessingExtension ext = new InstanceProcessingExtension(serviceProvider);
processors = ext.getInstanceProcessors();
} else {
processors = Collections.emptyList();
}
BrowseOrientInstanceCollection browser = new BrowseOrientInstanceCollection(database, null, DataSet.SOURCE);
final InstanceIndexService indexService;
if (doProcessing) {
indexService = serviceProvider.getService(InstanceIndexService.class);
} else {
indexService = null;
}
// TODO decouple next() and save()?
SimpleLogContext.withLog(report, () -> {
if (report != null && instances instanceof LogAware) {
((LogAware) instances).setLog(report);
}
ResourceIterator<Instance> it = instances.iterator();
int size = instances.size();
try {
while (it.hasNext() && !monitor.isCanceled()) {
// last count update
long lastUpdate = 0;
if (report != null && instances instanceof LogAware) {
((LogAware) instances).setLog(report);
}
Instance instance = it.next();
// further processing before storing
processInstance(instance);
// get/create OInstance
OInstance conv = ((instance instanceof OInstance) ? ((OInstance) instance) : (new OInstance(instance)));
conv.setInserted(true);
// update the instance to store, e.g. generating
// metadata
updateInstance(conv);
ODatabaseRecordThreadLocal.INSTANCE.set(db);
// configure the document
ODocument doc = conv.configureDocument(db);
// and save it
doc.save();
// Create an InstanceReference for the saved instance
// and
// feed it to all known InstanceProcessors. The
// decoration
// with ResolvableInstanceReference allows the
// InstanceProcessors to resolve the instances if
// required.
OrientInstanceReference oRef = new OrientInstanceReference(doc.getIdentity(), conv.getDataSet(), conv.getDefinition());
IdentifiableInstanceReference idRef = new IdentifiableInstanceReference(oRef, doc.getIdentity());
ResolvableInstanceReference resolvableRef = new ResolvableInstanceReference(idRef, browser);
processors.forEach(p -> p.process(instance, resolvableRef));
if (indexService != null) {
indexService.add(instance, resolvableRef);
}
count.incrementAndGet();
TypeDefinition type = instance.getDefinition();
if (type != null) {
typeCount.adjustOrPutValue(type.getName(), 1, 1);
}
if (exactProgress) {
monitor.worked(1);
}
long now = System.currentTimeMillis();
if (now - lastUpdate > 100) {
// only update every 100
// milliseconds
monitor.subTask(MessageFormat.format("{0}{1} instances processed", String.valueOf(count.get()), size != InstanceCollection.UNKNOWN_SIZE ? "/" + String.valueOf(size) : ""));
lastUpdate = now;
}
}
} finally {
it.close();
if (report != null && instances instanceof LogAware) {
((LogAware) instances).setLog(null);
}
}
});
db.declareIntent(null);
} catch (RuntimeException e) {
if (report != null) {
reportTypeCount(report, typeCount);
report.error(new MessageImpl("Error storing instances in database", e));
report.setSuccess(false);
reportHandler.publishReport(report);
}
throw e;
} finally {
ref.dispose();
trans.end();
/*
* Reset instances to prevent memory leak. It seems Eclipse
* internally holds a reference to the job (in JobInfo and/or
* ProgressMonitorFocusJobDialog) and this results in the instance
* collection not being garbage collected. This is especially bad,
* if an in-memory instance collection is used, e.g. a
* DefaultInstanceCollection that is used when loading a Shapefile.
*/
instances = null;
}
try {
onComplete();
} catch (RuntimeException e) {
String message = "Error while post processing stored instances";
if (report != null) {
report.error(new MessageImpl(message, e));
} else {
log.error(message, e);
}
}
String message = MessageFormat.format("Stored {0} instances in the database.", count);
if (monitor.isCanceled()) {
String warn = "Loading instances was canceled, incomplete data set in the database.";
if (report != null) {
report.warn(new MessageImpl(warn, null));
} else {
log.warn(warn);
}
}
if (report != null) {
reportTypeCount(report, typeCount);
report.setSuccess(true);
report.setSummary(message);
reportHandler.publishReport(report);
} else {
log.info(message);
}
monitor.done();
return new Status((monitor.isCanceled()) ? (IStatus.CANCEL) : (IStatus.OK), "eu.esdihumboldt.hale.common.instance.orient", message);
}
use of de.fhg.igd.slf4jplus.ATransaction in project hale by halestudio.
the class HeadlessIO method executeProvider.
/**
* Execute the given I/O provider with the given I/O advisor.
*
* @param provider the I/O provider
* @param advisor the I/O advisor
* @param progress the progress indicator, may be <code>null</code>
* @param reportHandler the report handler, may be <code>null</code>
* @throws IOException if executing the provider fails
*/
@SuppressWarnings("unchecked")
public static void executeProvider(final IOProvider provider, @SuppressWarnings("rawtypes") final IOAdvisor advisor, ProgressIndicator progress, ReportHandler reportHandler) throws IOException {
IOReporter reporter = provider.createReporter();
ATransaction trans = log.begin(reporter.getTaskName());
try {
// use advisor to configure provider
advisor.prepareProvider(provider);
advisor.updateConfiguration(provider);
// execute
IOReport report = provider.execute(progress);
if (reportHandler != null) {
reportHandler.publishReport(report);
}
// handle results
if (report.isSuccess()) {
advisor.handleResults(provider);
} else {
throw new IOException("Executing I/O provider not successful: " + report.getSummary());
}
} catch (Exception e) {
throw new IOException("Error executing an I/O provider.", e);
} finally {
trans.end();
}
}
use of de.fhg.igd.slf4jplus.ATransaction in project hale by halestudio.
the class ExamplesContent method getMappingContent.
/**
* Get the mapping documentation content for an example project.
*
* @param projectId the project ID
* @return the mapping documentation content stream or <code>null</code>
*/
private InputStream getMappingContent(String projectId) {
if (!mappingDocExportInitialized) {
mappingDocExport = HaleIO.createIOProvider(AlignmentWriter.class, null, ID_MAPPING_EXPORT);
if (mappingDocExport == null) {
log.error("Could not create mapping documentation exporter.");
}
mappingDocExportInitialized = true;
}
if (mappingDocExport == null) {
// no mapping documentation export possible
return null;
}
if (tempMappingDir == null) {
tempMappingDir = Files.createTempDir();
tempMappingDir.deleteOnExit();
}
// the file of the mapping documentation
File mappingDoc = new File(tempMappingDir, projectId + ".html");
if (!mappingDoc.exists()) {
ATransaction trans = log.begin("Generate example mapping documentation");
try {
// create the mapping documentation
ExampleProject exampleProject = ExampleProjectExtension.getInstance().get(projectId);
final Project project = (Project) exampleProject.getInfo();
// determine alignment location - contained in project file, not
// a resource
URI alignmentLoc = exampleProject.getAlignmentLocation();
if (alignmentLoc == null) {
// no alignment present
return null;
}
// store configurations per action ID
Multimap<String, IOConfiguration> confs = HashMultimap.create();
for (IOConfiguration conf : project.getResources()) {
confs.put(conf.getActionId(), conf);
}
// load schemas
// source schemas
LoadSchemaAdvisor source = new LoadSchemaAdvisor(SchemaSpaceID.SOURCE);
for (IOConfiguration conf : confs.get(SchemaIO.ACTION_LOAD_SOURCE_SCHEMA)) {
source.setConfiguration(conf);
executeProvider(source, conf.getProviderId(), null);
}
// target schemas
LoadSchemaAdvisor target = new LoadSchemaAdvisor(SchemaSpaceID.TARGET);
for (IOConfiguration conf : confs.get(SchemaIO.ACTION_LOAD_TARGET_SCHEMA)) {
target.setConfiguration(conf);
executeProvider(target, conf.getProviderId(), null);
}
// load alignment
// manual loading needed, as we can't rely on the environment
// alignment advisor
DefaultInputSupplier alignmentIn = new DefaultInputSupplier(alignmentLoc);
AlignmentReader reader = HaleIO.findIOProvider(AlignmentReader.class, alignmentIn, alignmentLoc.getPath());
LoadAlignmentAdvisor alignmentAdvisor = new LoadAlignmentAdvisor(null, source.getSchemaSpace(), target.getSchemaSpace(), exampleProject.getUpdater());
reader.setSource(alignmentIn);
executeProvider(alignmentAdvisor, null, reader);
Alignment alignment = alignmentAdvisor.getAlignment();
if (alignment != null) {
// save alignment docu
synchronized (mappingDocExport) {
// only a single instance
mappingDocExport.setAlignment(alignment);
mappingDocExport.setTarget(new FileIOSupplier(mappingDoc));
if (mappingDocExport instanceof ProjectInfoAware) {
ProjectInfo smallInfo = new ProjectInfo() {
@Override
public String getName() {
return project.getName();
}
@Override
public Date getModified() {
return null;
}
@Override
public Version getHaleVersion() {
return null;
}
@Override
public String getDescription() {
return project.getDescription();
}
@Override
public Date getCreated() {
return null;
}
@Override
public String getAuthor() {
return project.getAuthor();
}
};
// project);
((ProjectInfoAware) mappingDocExport).setProjectInfo(smallInfo);
}
mappingDocExport.execute(null);
}
mappingDoc.deleteOnExit();
}
} catch (Throwable e) {
log.error("Error generating mapping documentation for example project", e);
return null;
} finally {
trans.end();
}
}
if (mappingDoc.exists()) {
try {
return new FileInputStream(mappingDoc);
} catch (FileNotFoundException e) {
return null;
}
} else
return null;
}
Aggregations