use of org.corpus_tools.salt.common.SDocument in project ANNIS by korpling.
the class CorefVisualizer method writeOutput.
/**
* writes Output for the CorefVisualizer
* @param writer writer to write with
*/
@Override
public void writeOutput(VisualizerInput input, Writer w) {
// root html element
Html html = new Html();
Head head = new Head();
Body body = new Body();
html.removeXmlns();
html.appendChild(head);
html.appendChild(body);
try {
LinkedList<String> fonts = new LinkedList<String>();
if (input.getFont() != null) {
Link linkFont = new Link();
linkFont.setHref(input.getFont().getUrl());
head.appendChild(linkFont);
fonts.add(input.getFont().getName());
}
fonts.add("serif");
Link linkJQueryUI = new Link();
linkJQueryUI.setHref(input.getResourcePath("coref/jquery-ui-1.11.4.custom/jquery-ui.min.css"));
linkJQueryUI.setRel("stylesheet");
linkJQueryUI.setType("text/css");
head.appendChild(linkJQueryUI);
Link linkJQueryUIStructure = new Link();
linkJQueryUIStructure.setHref(input.getResourcePath("coref/jquery-ui-1.11.4.custom/jquery-ui.structure.min.css"));
linkJQueryUIStructure.setRel("stylesheet");
linkJQueryUIStructure.setType("text/css");
head.appendChild(linkJQueryUIStructure);
Script scriptJquery = new Script("text/javascript");
scriptJquery.setSrc(input.getResourcePath("coref/jquery.js"));
head.appendChild(scriptJquery);
Script scriptUI = new Script("text/javascript");
scriptUI.setSrc(input.getResourcePath("coref/jquery-ui-1.11.4.custom/jquery-ui.min.js"));
head.appendChild(scriptUI);
Link linkCoref = new Link();
linkCoref.setHref(input.getResourcePath("coref/coref.css"));
linkCoref.setRel("stylesheet");
linkCoref.setType("text/css");
head.appendChild(linkCoref);
Script scriptCoref = new Script("text/javascript");
scriptCoref.setSrc(input.getResourcePath("coref/CorefVisualizer.js"));
head.appendChild(scriptCoref);
body.setStyle("font-family: '" + StringUtils.join(fonts, "', '") + "';");
// get Info
globalIndex = 0;
tokensOfNode = new HashMap<String, List<String>>();
referentList = new LinkedList<TReferent>();
komponent = new LinkedList<TComponent>();
referentOfToken = new HashMap<String, HashMap<Long, Integer>>();
componentOfToken = new HashMap<String, List<Long>>();
componenttype = new LinkedList<TComponenttype>();
SDocument saltDoc = input.getDocument();
SDocumentGraph saltGraph = saltDoc.getDocumentGraph();
if (saltGraph == null) {
body.setText("An Error occured: Could not get Graph of Result (Graph == null).");
return;
}
List<SRelation<SNode, SNode>> edgeList = saltGraph.getRelations();
if (edgeList == null) {
return;
}
for (SRelation rawRel : edgeList) {
if (includeEdge(rawRel, input.getNamespace())) {
SPointingRelation rel = (SPointingRelation) rawRel;
String relType = componentNameForRelation(rel);
visitedNodes = new LinkedList<String>();
// got type for this?
boolean gotIt = false;
int componentnr;
for (componentnr = 0; componentnr < componenttype.size(); componentnr++) {
if (componenttype.get(componentnr) != null && componenttype.get(componentnr).type != null && componenttype.get(componentnr).nodeList != null && componenttype.get(componentnr).type.equals(relType) && componenttype.get(componentnr).nodeList.contains(rel.getSource().getId())) {
gotIt = true;
break;
}
}
TComponent currentComponent;
TComponenttype currentComponenttype;
if (gotIt) {
currentComponent = komponent.get(componentnr);
currentComponenttype = componenttype.get(componentnr);
} else {
currentComponenttype = new TComponenttype();
currentComponenttype.type = relType;
componenttype.add(currentComponenttype);
componentnr = komponent.size();
currentComponent = new TComponent();
currentComponent.type = relType;
currentComponent.tokenList = new LinkedList<String>();
komponent.add(currentComponent);
currentComponenttype.nodeList.add(rel.getSource().getId());
}
TReferent ref = new TReferent();
ref.annotations = new HashSet<SerializableAnnotation>();
for (SAnnotation anno : rel.getAnnotations()) {
ref.annotations.add(new SerializableAnnotation(anno));
}
ref.component = componentnr;
referentList.add(ref);
List<String> currentTokens = getAllTokens(rel.getSource(), componentNameForRelation(rel), currentComponenttype, componentnr, input.getNamespace());
// neu
setReferent(rel.getTarget(), globalIndex, 0);
// neu
setReferent(rel.getSource(), globalIndex, 1);
for (String s : currentTokens) {
if (!currentComponent.tokenList.contains(s)) {
currentComponent.tokenList.add(s);
}
}
globalIndex++;
}
}
colorlist = new HashMap<Integer, Integer>();
// A list containing all the generated HTML elements, one list entry
// for each text.
List<List<Node>> nodesPerText = new LinkedList<List<Node>>();
// write output for each text separatly
List<STextualDS> texts = saltGraph.getTextualDSs();
if (texts != null && !texts.isEmpty()) {
for (STextualDS t : texts) {
DataSourceSequence<Integer> sequence = new DataSourceSequence<>(t, 0, (t.getText() != null) ? t.getText().length() : 0);
List<SToken> token = saltGraph.getSortedTokenByText(saltGraph.getTokensBySequence(sequence));
if (token != null) {
boolean validText = true;
if (Boolean.parseBoolean(input.getMappings().getProperty("hide_empty", "false"))) {
validText = false;
// check if the text contains any matching annotations
for (SToken tok : token) {
/*
* The token is only added to this map if an valid edge
* (according to the resolver trigger) conntected to
* this token was found.
*/
if (referentOfToken.get(tok.getId()) != null && !referentOfToken.get(tok.getId()).isEmpty()) {
validText = true;
break;
}
}
}
if (validText) {
List<Node> nodes = outputSingleText(token, input);
nodesPerText.add(nodes);
}
}
}
// end for each STexutalDS
/*
* Append the generated output to the body, wrap in table if necessary.
*/
// present all texts as columns side by side if using multiple texts
Table tableTexts = new Table();
Tr trTextRow = new Tr();
trTextRow.setCSSClass("textRow");
// only append wrapper table if we have multiple texts
if (nodesPerText.size() > 1) {
body.appendChild(tableTexts);
tableTexts.appendChild(trTextRow);
}
for (List<Node> nodes : nodesPerText) {
// multi-text mode?
if (nodesPerText.size() > 1) {
Td tdSingleText = new Td();
trTextRow.appendChild(tdSingleText);
tdSingleText.setCSSClass("text");
tdSingleText.appendChild(nodes);
} else {
body.appendChild(nodes);
}
}
} else {
Text errorTxt = new Text("Could not find any texts for the " + input.getNamespace() + " node namespace (layer).");
body.appendChild(errorTxt);
}
// write HTML4 transitional doctype
w.append(new Doctype(DocumentType.HTMLTransitional).write());
// append the html tree
w.append(html.write());
} catch (IOException ex) {
log.error(null, ex);
}
}
use of org.corpus_tools.salt.common.SDocument in project ANNIS by korpling.
the class QueryDaoImpl method exportCorpus.
@Override
@Transactional(readOnly = true)
public void exportCorpus(String toplevelCorpus, File outputDirectory) {
// check if the corpus really exists
mapCorpusNameToId(toplevelCorpus);
SaltProject corpusProject = SaltFactory.createSaltProject();
SCorpusGraph corpusGraph = SaltFactory.createSCorpusGraph();
corpusGraph.setSaltProject(corpusProject);
SCorpus rootCorpus = corpusGraph.createCorpus(null, toplevelCorpus);
// add all root metadata
for (Annotation metaAnno : listCorpusAnnotations(toplevelCorpus)) {
rootCorpus.createMetaAnnotation(metaAnno.getNamespace(), metaAnno.getName(), metaAnno.getValue());
}
File documentRootDir = new File(outputDirectory, toplevelCorpus);
if (!outputDirectory.exists()) {
if (!outputDirectory.mkdirs()) {
log.warn("Could not create output directory \"{}\" for exporting the corpus", outputDirectory.getAbsolutePath());
}
}
List<Annotation> docs = listDocuments(toplevelCorpus);
int i = 1;
for (Annotation docAnno : docs) {
log.info("Loading document {} from database ({}/{})", docAnno.getName(), i, docs.size());
SaltProject docProject = retrieveAnnotationGraph(toplevelCorpus, docAnno.getName(), null);
if (docProject != null && docProject.getCorpusGraphs() != null && !docProject.getCorpusGraphs().isEmpty()) {
List<Annotation> docMetaData = listCorpusAnnotations(toplevelCorpus, docAnno.getName(), true);
SCorpusGraph docCorpusGraph = docProject.getCorpusGraphs().get(0);
// TODO: we could re-use the actual corpus structure instead of just adding a flat list of documents
if (docCorpusGraph.getDocuments() != null) {
for (SDocument doc : docCorpusGraph.getDocuments()) {
log.info("Removing SFeatures from {} ({}/{})", docAnno.getName(), i, docs.size());
// remove all ANNIS specific features that require a special Java class
SDocumentGraph graph = doc.getDocumentGraph();
if (graph != null) {
if (graph.getNodes() != null) {
for (SNode n : graph.getNodes()) {
n.removeLabel(AnnisConstants.ANNIS_NS, AnnisConstants.FEAT_RELANNIS_NODE);
}
}
if (graph.getRelations() != null) {
for (SRelation e : graph.getRelations()) {
e.removeLabel(AnnisConstants.ANNIS_NS, AnnisConstants.FEAT_RELANNIS_EDGE);
}
}
}
log.info("Saving document {} ({}/{})", doc.getName(), i, docs.size());
SaltUtil.saveDocumentGraph(graph, URI.createFileURI(new File(documentRootDir, doc.getName() + "." + SaltUtil.FILE_ENDING_SALT_XML).getAbsolutePath()));
SDocument docCopy = corpusGraph.createDocument(rootCorpus, doc.getName());
log.info("Adding metadata to document {} ({}/{})", doc.getName(), i, docs.size());
for (Annotation metaAnno : docMetaData) {
docCopy.createMetaAnnotation(metaAnno.getNamespace(), metaAnno.getName(), metaAnno.getValue());
}
}
}
}
i++;
}
// end for each document
// save the actual SaltProject
log.info("Saving corpus structure");
File projectFile = new File(outputDirectory, SaltUtil.FILE_SALT_PROJECT);
SaltXML10Writer writer = new SaltXML10Writer(projectFile);
writer.writeSaltProject(corpusProject);
}
use of org.corpus_tools.salt.common.SDocument in project ANNIS by korpling.
the class AutoSimpleRegexQuery method analyzingQuery.
@Override
public void analyzingQuery(SaltProject saltProject) {
List<String> tokens = new ArrayList<>();
for (SCorpusGraph g : saltProject.getCorpusGraphs()) {
if (g != null) {
for (SDocument doc : g.getDocuments()) {
SDocumentGraph docGraph = doc.getDocumentGraph();
List<SNode> sNodes = docGraph.getNodes();
if (sNodes != null) {
for (SNode n : sNodes) {
if (n instanceof SToken) {
tokens.add(CommonHelper.getSpannedText((SToken) n));
}
}
}
}
}
}
// try to find a word with which is contained twice with Capitalize letter.
text = null;
for (int i = 0; i < tokens.size(); i++) {
for (int j = i + 1; j < tokens.size(); j++) {
if (tokens.get(i).equalsIgnoreCase(tokens.get(j))) {
if (tokens.get(i).length() > 1 && ((Character.isLowerCase(tokens.get(i).charAt(0)) && Character.isUpperCase(tokens.get(j).charAt(0))) || (Character.isLowerCase(tokens.get(j).charAt(0)) && Character.isUpperCase(tokens.get(i).charAt(0))))) {
text = tokens.get(i);
break;
}
}
}
}
if (text != null) {
Character upperLetter = Character.toUpperCase(text.charAt(0));
Character lowerLetter = Character.toLowerCase(text.charAt(0));
String rest = StringUtils.substring(text, -(text.length() - 1));
finalAQL = "/[" + upperLetter + lowerLetter + "]" + rest + "/";
} else {
// select one random token from the result
int tries = 10;
int r = new Random().nextInt(tokens.size() - 1);
text = tokens.get(r);
while ("".equals(text) && tries > 0) {
r = new Random().nextInt(tokens.size() - 1);
text = tokens.get(r);
tries--;
}
if (!"".equals(text) && text.length() > 1) {
Character upperLetter = Character.toUpperCase(text.charAt(0));
Character lowerLetter = Character.toLowerCase(text.charAt(0));
String rest = StringUtils.substring(text, -(text.length() - 1));
finalAQL = "/[" + upperLetter + lowerLetter + "]" + rest + "/";
} else {
finalAQL = "";
}
}
}
use of org.corpus_tools.salt.common.SDocument in project ANNIS by korpling.
the class SaltAnnotateExtractor method addMatchInformation.
/**
* Sets additional match (global) information about the matched nodes and
* annotations.
*
* This will add the {@link AnnisConstants#FEAT_MATCHEDIDS) to all {@link SDocument} elements of the
* salt project.
*
* @param p The salt project to add the features to.
* @param matchGroup A list of matches in the same order as the corpus graphs
* of the salt project.
*/
public static void addMatchInformation(SaltProject p, MatchGroup matchGroup) {
int matchIndex = 0;
for (Match m : matchGroup.getMatches()) {
// get the corresponding SDocument of the salt project
SCorpusGraph corpusGraph = p.getCorpusGraphs().get(matchIndex);
SDocument doc = corpusGraph.getDocuments().get(0);
setMatchedIDs(doc.getDocumentGraph(), m);
matchIndex++;
}
}
use of org.corpus_tools.salt.common.SDocument in project ANNIS by korpling.
the class SaltBasedExporter method convertSaltProject.
/**
* Iterates over all matches (modelled as corpus graphs) and calls {@link #convertText(de.hu_berlin.german.korpling.saltnpepper.salt.saltCommon.sDocumentStructure.SDocumentGraph, java.util.List, java.util.Map, int, java.io.Writer) } for
* the single document graph.
* @param p
* @param annoKeys
* @param args
* @param alignmc
* @param offset
* @param out
*/
// invokes the createAdjacencyMatrix method, if nodeCount != null or outputText otherwise
private void convertSaltProject(SaltProject p, List<String> annoKeys, Map<String, String> args, boolean alignmc, int offset, Map<String, CorpusConfig> corpusConfigs, Writer out, Integer nodeCount) throws IOException, IllegalArgumentException {
int recordNumber = offset;
if (p != null && p.getCorpusGraphs() != null) {
Map<String, String> spanAnno2order = null;
boolean virtualTokenizationFromNamespace = false;
Set<String> corpusNames = CommonHelper.getToplevelCorpusNames(p);
if (!corpusNames.isEmpty()) {
CorpusConfig config = corpusConfigs.get(corpusNames.iterator().next());
if (config != null) {
if ("true".equalsIgnoreCase(config.getConfig("virtual_tokenization_from_namespace"))) {
virtualTokenizationFromNamespace = true;
} else {
String mappingRaw = config.getConfig("virtual_tokenization_mapping");
if (mappingRaw != null) {
spanAnno2order = new HashMap<>();
for (String singleMapping : Splitter.on(',').split(mappingRaw)) {
List<String> mappingParts = Splitter.on('=').splitToList(singleMapping);
if (mappingParts.size() >= 2) {
spanAnno2order.put(mappingParts.get(0), mappingParts.get(1));
}
}
}
}
}
}
for (SCorpusGraph corpusGraph : p.getCorpusGraphs()) {
if (corpusGraph.getDocuments() != null) {
for (SDocument doc : corpusGraph.getDocuments()) {
if (virtualTokenizationFromNamespace) {
TimelineReconstructor.removeVirtualTokenizationUsingNamespace(doc.getDocumentGraph());
} else if (spanAnno2order != null) {
// there is a definition how to map the virtual tokenization to a real one
TimelineReconstructor.removeVirtualTokenization(doc.getDocumentGraph(), spanAnno2order);
}
if (nodeCount != null) {
createAdjacencyMatrix(doc.getDocumentGraph(), args, recordNumber++, nodeCount);
} else {
outputText(doc.getDocumentGraph(), alignmc, recordNumber++, out);
}
}
}
}
}
}
Aggregations