use of eu.esdihumboldt.hale.common.instance.orient.OInstance in project hale by halestudio.
the class OrientInstanceSink method putInstance.
/**
* Add an instance to the database and return a reference to it.
*
* @param instance the instance to add
* @return the instance reference
*/
public synchronized InstanceReference putInstance(Instance instance) {
if (ref == null) {
ref = database.openWrite();
}
ODatabaseDocumentTx db = ref.getDatabase();
// further processing before saving
processInstance(instance);
// get/create OInstance
OInstance conv = ((instance instanceof OInstance) ? ((OInstance) instance) : (new OInstance(instance)));
conv.setInserted(true);
ODatabaseRecordThreadLocal.INSTANCE.set(db);
// configure the document
ODocument doc = conv.configureDocument(db);
// and save it
doc.save();
return new OrientInstanceReference(doc.getIdentity(), instance.getDataSet(), instance.getDefinition());
}
use of eu.esdihumboldt.hale.common.instance.orient.OInstance in project hale by halestudio.
the class Transformation method transform.
/**
* Transform the given instances, according to the given alignment.
*
* @param sources the collection of source instances
* @param targetSink the target sink
* @param exportJob the export job
* @param validationJob the validation job, may be <code>null</code>
* @param alignment the alignment, may not be changed outside this method
* @param sourceSchema the source schema
* @param reportHandler the report handler
* @param serviceProvider the service provider in the transformation context
* @param processId the identifier for the transformation process, may be
* <code>null</code> if grouping the jobs to a job family is not
* necessary
* @param settings the transformation settings
* @return the future representing the successful completion of the
* transformation (note that a successful completion doesn't
* necessary mean there weren't any internal transformation errors)
*/
public static ListenableFuture<Boolean> transform(InstanceCollection sources, final TransformationSink targetSink, final ExportJob exportJob, final ValidationJob validationJob, final Alignment alignment, SchemaSpace sourceSchema, final ReportHandler reportHandler, final ServiceProvider serviceProvider, final Object processId, TransformationSettings settings) {
final SettableFuture<Boolean> result = SettableFuture.create();
final InstanceCollection sourceToUse;
// Check whether to create a temporary database or not.
// Currently do not create a temporary DB is there are Retypes/Creates
// only.
final LocalOrientDB db;
boolean useTempDatabase = settings.useTemporaryDatabase().orElseGet(() -> {
boolean useDb = false;
for (Cell cell : alignment.getActiveTypeCells()) {
if (!isStreamingTypeTransformation(cell.getTransformationIdentifier())) {
useDb = true;
break;
}
}
return useDb;
});
// Create temporary database if necessary.
if (useTempDatabase) {
// create db
File tmpDir = Files.createTempDir();
db = new LocalOrientDB(tmpDir);
tmpDir.deleteOnExit();
// get instance collection
// sourceToUse = new BrowseOrientInstanceCollection(db, sourceSchema, DataSet.SOURCE);
// only yield instances that were actually inserted
// this is also done in OrientInstanceService
// TODO make configurable?
sourceToUse = FilteredInstanceCollection.applyFilter(new BrowseOrientInstanceCollection(db, sourceSchema, DataSet.SOURCE), new Filter() {
@Override
public boolean match(Instance instance) {
Instance inst = (instance instanceof InstanceDecorator) ? InstanceDecorator.getRoot(instance) : instance;
if (inst instanceof OInstance) {
return ((OInstance) inst).isInserted();
}
return true;
}
});
} else {
sourceToUse = new StatsCountInstanceCollection(sources, reportHandler);
db = null;
}
// create transformation job
final AbstractTransformationJob transformJob = new AbstractTransformationJob("Transformation") {
/**
* @see org.eclipse.core.runtime.jobs.Job#run(org.eclipse.core.runtime.IProgressMonitor)
*/
@Override
protected IStatus run(IProgressMonitor monitor) {
TransformationService transformationService = HalePlatform.getService(TransformationService.class);
TransformationReport report = transformationService.transform(alignment, sourceToUse, targetSink, serviceProvider, new ProgressMonitorIndicator(monitor));
try {
// publish report
reportHandler.publishReport(report);
if (report.isSuccess()) {
return Status.OK_STATUS;
} else {
return ERROR_STATUS;
}
} finally {
// and may lead to the transformation report being lost
if (monitor.isCanceled()) {
targetSink.done(true);
return Status.CANCEL_STATUS;
} else {
targetSink.done(false);
}
}
}
};
// set process IDs to group jobs in a job family
if (processId != null) {
transformJob.setProcessId(processId);
exportJob.setProcessId(processId);
if (validationJob != null) {
validationJob.setProcessId(processId);
}
}
exportJob.setUser(true);
// the jobs should cancel each other
transformJob.addJobChangeListener(new JobChangeAdapter() {
@Override
public void done(IJobChangeEvent event) {
if (!event.getResult().isOK()) {
// log transformation job error (because it otherwise gets
// lost)
String msg = "Error during transformation";
if (event.getResult().getMessage() != null) {
msg = ": " + event.getResult().getMessage();
}
log.error(msg, event.getResult().getException());
// failing transformation is done by cancelling the export
exportJob.cancel();
}
if (db != null) {
db.delete();
}
}
});
// after export is done, validation should run
exportJob.addJobChangeListener(new JobChangeAdapter() {
@Override
public void done(IJobChangeEvent event) {
if (!event.getResult().isOK()) {
transformJob.cancel();
// failure
failure(result, event);
} else {
if (validationJob == null) {
// success
result.set(true);
} else {
// schedule the validation job
validationJob.schedule();
}
}
}
});
// validation ends the process
if (validationJob != null) {
validationJob.addJobChangeListener(new JobChangeAdapter() {
@Override
public void done(IJobChangeEvent event) {
if (!event.getResult().isOK()) {
// failure
failure(result, event);
} else {
// success
result.set(true);
}
}
});
}
if (useTempDatabase) {
// Initialize instance index with alignment
InstanceIndexService indexService = serviceProvider.getService(InstanceIndexService.class);
indexService.addPropertyMappings(alignment.getActiveTypeCells(), serviceProvider);
// run store instance job first...
Job storeJob = new StoreInstancesJob("Load source instances into temporary database", db, sources, serviceProvider, reportHandler, true) {
@Override
protected void onComplete() {
// onComplete is also called if monitor is cancelled...
}
@Override
public boolean belongsTo(Object family) {
if (processId == null) {
return super.belongsTo(family);
}
return AbstractTransformationJob.createFamily(processId).equals(family);
}
};
// and schedule jobs on successful completion
storeJob.addJobChangeListener(new JobChangeAdapter() {
@Override
public void done(IJobChangeEvent event) {
if (event.getResult().isOK()) {
exportJob.schedule();
transformJob.schedule();
} else {
failure(result, event);
}
}
});
storeJob.schedule();
} else {
// otherwise feed InstanceProcessors directly from the
// InstanceCollection...
// TODO Implement differently, not w/ PseudoInstanceReference which
// will cause memory problems
// final InstanceProcessingExtension ext = new InstanceProcessingExtension(
// serviceProvider);
// final List<InstanceProcessor> processors = ext.getInstanceProcessors();
//
// ResourceIterator<Instance> it = sourceToUse.iterator();
// try {
// while (it.hasNext()) {
// Instance instance = it.next();
//
// ResolvableInstanceReference resolvableRef = new ResolvableInstanceReference(
// new PseudoInstanceReference(instance), sourceToUse);
// processors.forEach(p -> p.process(instance, resolvableRef));
//
// }
// } finally {
// it.close();
// }
// ...and schedule jobs
exportJob.schedule();
transformJob.schedule();
}
return result;
}
Aggregations