use of org.apache.uima.analysis_engine.AnalysisEngineDescription in project dkpro-lab by dkpro.
the class UimaAsExecutionEngine method initializeService.
protected void initializeService() throws Exception {
// Create Asynchronous Engine API
uimaAsEngine = new BaseUIMAAsynchronousEngine_impl();
// Save the AED to a file because UIMA-AS cannot have an AED direclty embedded in its
// descriptor
AnalysisEngineDescription topDescriptor = configuration.getAnalysisEngineDescription(ctx);
ResourceMetaData topMetaData = topDescriptor.getMetaData();
File topDescriptorFile = File.createTempFile(getClass().getSimpleName(), ".xml");
topDescriptorFile.deleteOnExit();
try (OutputStream os = new FileOutputStream(topDescriptorFile)) {
topDescriptor.toXML(os);
}
// Create service descriptor
ServiceContext context = new ServiceContextImpl(topMetaData.getName(), topMetaData.getDescription(), topDescriptorFile.getAbsolutePath(), endpoint, brokerUrl);
UimaASPrimitiveDeploymentDescriptor dd = DeploymentDescriptorFactory.createPrimitiveDeploymentDescriptor(context);
// Store service descriptor also to a temporary file
File deploymentDescriptionFile = File.createTempFile(getClass().getSimpleName(), ".xml");
deploymentDescriptionFile.deleteOnExit();
dd.save(deploymentDescriptionFile);
Map<String, Object> serviceCtx = new HashMap<String, Object>();
serviceCtx.put(UimaAsynchronousEngine.DD2SpringXsltFilePath, getUrlAsFile(getClass().getResource("/uima-as/dd2spring.xsl"), true).getAbsolutePath());
serviceCtx.put(UimaAsynchronousEngine.SaxonClasspath, getClass().getResource("/uima-as/saxon8.jar").toString());
serviceId = uimaAsEngine.deploy(deploymentDescriptionFile.getAbsolutePath(), serviceCtx);
ctx.message("Deployed experiment as UIMA-AS service: [" + serviceId + "]");
}
use of org.apache.uima.analysis_engine.AnalysisEngineDescription in project dkpro-tc by dkpro.
the class MetaInfoTask method getAnalysisEngineDescription.
@Override
public AnalysisEngineDescription getAnalysisEngineDescription(TaskContext aContext) throws ResourceInitializationException, IOException {
featureExtractorNames = new HashSet<>();
// check for error conditions
if (featureExtractors == null) {
throw new ResourceInitializationException(new TextClassificationException("No feature extractors have been added to the experiment."));
}
List<AnalysisEngineDescription> metaCollectors = new ArrayList<>();
if (recordContext) {
AnalysisEngineDescription aed = injectContextMetaCollector(aContext);
if (aed == null) {
throw new NullPointerException("Initializing a ContextMetaCollector returned an AnalysisEngineDescription which was [NULL]");
}
metaCollectors.add(aed);
}
try {
// Configure the meta collectors for each feature extractor individually
for (TcFeature feClosure : featureExtractors) {
ExternalResourceDescription feDesc = feClosure.getActualValue();
Class<?> feClass = getClass(feDesc);
// Skip feature extractors that are not dependent on meta collectors
if (!MetaDependent.class.isAssignableFrom(feClass)) {
continue;
}
MetaDependent feInstance = (MetaDependent) feClass.newInstance();
Map<String, Object> parameterSettings = ConfigurationParameterFactory.getParameterSettings(feDesc.getResourceSpecifier());
validateUniqueFeatureExtractorNames(parameterSettings);
// Tell the meta collectors where to store their data
for (MetaCollectorConfiguration conf : feInstance.getMetaCollectorClasses(parameterSettings)) {
configureStorageLocations(aContext, conf.descriptor, (String) feClosure.getId(), conf.collectorOverrides, AccessMode.READWRITE);
metaCollectors.add(conf.descriptor);
}
}
} catch (ClassNotFoundException | InstantiationException | IllegalAccessException e) {
throw new ResourceInitializationException(e);
}
// make sure that the meta key import can be resolved (even when no meta features have been
// extracted, as in the regression demo)
aContext.getFolder(META_KEY, AccessMode.READONLY);
AggregateBuilder builder = new AggregateBuilder();
for (AnalysisEngineDescription metaCollector : metaCollectors) {
if (operativeViews != null) {
for (String viewName : operativeViews) {
builder.add(metaCollector, CAS.NAME_DEFAULT_SOFA, viewName);
}
} else {
builder.add(metaCollector);
}
}
return builder.createAggregateDescription();
}
use of org.apache.uima.analysis_engine.AnalysisEngineDescription in project dkpro-tc by dkpro.
the class InitTaskDeep method getAnalysisEngineDescription.
// what should actually be done in this task
@Override
public AnalysisEngineDescription getAnalysisEngineDescription(TaskContext aContext) throws ResourceInitializationException, IOException {
String output = isTesting ? OUTPUT_KEY_TEST : OUTPUT_KEY_TRAIN;
File folder = aContext.getFolder(output, AccessMode.READWRITE);
AnalysisEngineDescription xmiWriter = createEngineDescription(BinaryCasWriter.class, BinaryCasWriter.PARAM_TARGET_LOCATION, folder.getPath(), BinaryCasWriter.PARAM_FORMAT, "6+");
// special connector that just checks whether there are no instances and outputs a
// meaningful error message then
// should be added before preprocessing
AnalysisEngineDescription emptyProblemChecker = createEngineDescription(PreprocessConnector.class);
if (operativeViews != null) {
AggregateBuilder builder = new AggregateBuilder();
for (String viewName : operativeViews) {
builder.add(createEngineDescription(preprocessing), CAS.NAME_DEFAULT_SOFA, viewName);
}
preprocessing = builder.createAggregateDescription();
}
AggregateBuilder builder = new AggregateBuilder();
if (dropVocabWithoutEmbedding) {
builder.add(createEngineDescription(FilterVocabularyByEmbeddingAnnotator.class, FilterVocabularyByEmbeddingAnnotator.PARAM_EMBEDDING, embedding));
}
builder.add(createEngineDescription(AssignIdConnector.class));
builder.add(emptyProblemChecker);
builder.add(preprocessing);
builder.add(xmiWriter);
return builder.createAggregateDescription();
}
use of org.apache.uima.analysis_engine.AnalysisEngineDescription in project dkpro-tc by dkpro.
the class ExtractFeaturesConnectorTest method extractFeaturesConnectorRegressionTest.
@Test
public void extractFeaturesConnectorRegressionTest() throws Exception {
File outputPath = folder.newFolder();
// we do not need parameters here, but in case we do :)
Object[] parameters = new Object[] { NoopFeatureExtractor.PARAM_UNIQUE_EXTRACTOR_NAME, "123", UnitContextMetaCollector.PARAM_CONTEXT_FOLDER, Constants.ID_CONTEXT_KEY };
ExternalResourceDescription featureExtractor = ExternalResourceFactory.createExternalResourceDescription(NoopFeatureExtractor.class, parameters);
List<ExternalResourceDescription> fes = new ArrayList<>();
fes.add(featureExtractor);
CollectionReaderDescription reader = CollectionReaderFactory.createReaderDescription(TestReaderRegression.class, TestReaderRegression.PARAM_SOURCE_LOCATION, "src/test/resources/data/*.txt");
AnalysisEngineDescription segmenter = AnalysisEngineFactory.createEngineDescription(BreakIteratorSegmenter.class);
AnalysisEngineDescription doc = AnalysisEngineFactory.createEngineDescription(DocumentModeAnnotator.class, DocumentModeAnnotator.PARAM_FEATURE_MODE, Constants.FM_DOCUMENT);
AnalysisEngineDescription featExtractorConnector = TaskUtils.getFeatureExtractorConnector(outputPath.getAbsolutePath(), JsonDataWriter.class.getName(), Constants.LM_REGRESSION, Constants.FM_DOCUMENT, false, false, false, false, Collections.emptyList(), fes, new String[] {});
SimplePipeline.runPipeline(reader, segmenter, doc, featExtractorConnector);
Gson gson = new Gson();
List<String> lines = FileUtils.readLines(new File(outputPath, JsonDataWriter.JSON_FILE_NAME), "utf-8");
List<Instance> instances = new ArrayList<>();
for (String l : lines) {
instances.add(gson.fromJson(l, Instance.class));
}
assertEquals(2, instances.size());
assertEquals(1, getUniqueOutcomes(instances));
assertEquals("0.45", instances.get(0).getOutcome());
System.out.println(FileUtils.readFileToString(new File(outputPath, JsonDataWriter.JSON_FILE_NAME), "utf-8"));
}
use of org.apache.uima.analysis_engine.AnalysisEngineDescription in project dkpro-tc by dkpro.
the class LuceneMetaCollectionBasedFeatureTestBase method runFeatureExtractor.
protected void runFeatureExtractor(File luceneFolder, AnalysisEngineDescription featureExtractor) throws Exception {
CollectionReaderDescription reader = getFeatureReader();
AnalysisEngineDescription segmenter = AnalysisEngineFactory.createEngineDescription(BreakIteratorSegmenter.class);
SimplePipeline.runPipeline(reader, segmenter, featureExtractor);
}
Aggregations