use of de.tudarmstadt.ukp.clarin.webanno.tsv.internal.tsv3x.model.TsvDocument in project webanno by webanno.
the class Tsv3XCasDocumentBuilder method of.
public static TsvDocument of(TsvSchema aSchema, JCas aJCas) {
TsvFormatHeader format = new TsvFormatHeader("WebAnno TSV", "3.2");
TsvDocument doc = new TsvDocument(format, aSchema, aJCas);
// Fill document with all the sentences and tokens
for (Sentence uimaSentence : select(aJCas, Sentence.class)) {
TsvSentence sentence = doc.createSentence(uimaSentence);
for (Token uimaToken : selectCovered(Token.class, uimaSentence)) {
sentence.createToken(uimaToken);
}
}
// Scan for chains
for (Type headType : aSchema.getChainHeadTypes()) {
for (FeatureStructure chainHead : CasUtil.selectFS(aJCas.getCas(), headType)) {
List<AnnotationFS> elements = new ArrayList<>();
AnnotationFS link = getFeature(chainHead, CHAIN_FIRST_FEAT, AnnotationFS.class);
while (link != null) {
elements.add(link);
link = getFeature(link, CHAIN_NEXT_FEAT, AnnotationFS.class);
}
if (!elements.isEmpty()) {
Type elementType = headType.getFeatureByBaseName(CHAIN_FIRST_FEAT).getRange();
doc.createChain(headType, elementType, elements);
}
}
}
// Build indexes over the token start and end positions such that we can quickly locate
// tokens based on their offsets.
NavigableMap<Integer, TsvToken> tokenBeginIndex = new TreeMap<>();
NavigableMap<Integer, TsvToken> tokenEndIndex = new TreeMap<>();
List<TsvToken> tokens = new ArrayList<>();
for (TsvSentence sentence : doc.getSentences()) {
for (TsvToken token : sentence.getTokens()) {
tokenBeginIndex.put(token.getBegin(), token);
tokenEndIndex.put(token.getEnd(), token);
tokens.add(token);
}
}
// units.
for (Type type : aSchema.getUimaTypes()) {
LayerType layerType = aSchema.getLayerType(type);
boolean addDisambiguationIdIfStacked = SPAN.equals(layerType);
for (AnnotationFS annotation : CasUtil.select(aJCas.getCas(), type)) {
doc.activateType(annotation.getType());
// Get the relevant begin and end offsets for the current annotation
int begin = annotation.getBegin();
int end = annotation.getEnd();
// to be sure.
if (RELATION.equals(layerType)) {
AnnotationFS targetFS = getFeature(annotation, FEAT_REL_TARGET, AnnotationFS.class);
begin = targetFS.getBegin();
end = targetFS.getEnd();
}
TsvToken beginToken = tokenBeginIndex.floorEntry(begin).getValue();
TsvToken endToken = tokenEndIndex.ceilingEntry(end).getValue();
// value obtained from the tokenBeginIndex.
if (begin == end) {
beginToken = endToken;
}
boolean singleToken = beginToken == endToken;
boolean zeroWitdh = begin == end;
boolean multiTokenCapable = SPAN.equals(layerType) || CHAIN.equals(layerType);
// in either case.
if (beginToken.getBegin() == begin && endToken.getEnd() == end) {
doc.mapFS2Unit(annotation, beginToken);
beginToken.addUimaAnnotation(annotation, addDisambiguationIdIfStacked);
if (multiTokenCapable) {
endToken.addUimaAnnotation(annotation, addDisambiguationIdIfStacked);
}
} else if (zeroWitdh) {
TsvSubToken t = beginToken.createSubToken(begin, min(beginToken.getEnd(), end));
doc.mapFS2Unit(annotation, t);
t.addUimaAnnotation(annotation, addDisambiguationIdIfStacked);
} else {
// the annotation.
if (beginToken.getBegin() < begin) {
TsvSubToken t = beginToken.createSubToken(begin, min(beginToken.getEnd(), end));
doc.mapFS2Unit(annotation, t);
t.addUimaAnnotation(annotation, addDisambiguationIdIfStacked);
} else // If not the sub-token is ID-defining, then the begin token is ID-defining
{
beginToken.addUimaAnnotation(annotation, addDisambiguationIdIfStacked);
doc.mapFS2Unit(annotation, beginToken);
}
// checking if if singleToke is true.
if (endToken.getEnd() > end) {
TsvSubToken t = endToken.createSubToken(max(endToken.getBegin(), begin), end);
t.addUimaAnnotation(annotation, addDisambiguationIdIfStacked);
if (!singleToken) {
doc.mapFS2Unit(annotation, t);
}
} else if (!singleToken && multiTokenCapable) {
endToken.addUimaAnnotation(annotation, addDisambiguationIdIfStacked);
}
}
// the end token
if (multiTokenCapable && !singleToken) {
ListIterator<TsvToken> i = tokens.listIterator(tokens.indexOf(beginToken));
TsvToken t;
while ((t = i.next()) != endToken) {
if (t != beginToken) {
t.addUimaAnnotation(annotation, addDisambiguationIdIfStacked);
}
}
}
// Multi-token span annotations must get a disambiguation ID
if (SPAN.equals(layerType) && !singleToken) {
doc.addDisambiguationId(annotation);
}
}
}
// Scan all created units to see which columns actually contains values
for (TsvSentence sentence : doc.getSentences()) {
for (TsvToken token : sentence.getTokens()) {
scanUnitForActiveColumns(token);
scanUnitForAmbiguousSlotReferences(token);
for (TsvSubToken subToken : token.getSubTokens()) {
scanUnitForActiveColumns(subToken);
scanUnitForAmbiguousSlotReferences(subToken);
}
}
}
// Activate the placeholder columns for any active types for which no other columns are
// active.
Set<Type> activeTypesNeedingPlaceholders = new HashSet<>(doc.getActiveTypes());
for (TsvColumn col : doc.getActiveColumns()) {
activeTypesNeedingPlaceholders.remove(col.uimaType);
}
for (TsvColumn col : doc.getSchema().getColumns()) {
if (PLACEHOLDER.equals(col.featureType) && activeTypesNeedingPlaceholders.contains(col.uimaType)) {
doc.activateColumn(col);
}
}
return doc;
}
use of de.tudarmstadt.ukp.clarin.webanno.tsv.internal.tsv3x.model.TsvDocument in project webanno by webanno.
the class Tsv3XDeserializer method read.
public void read(LineNumberReader aIn, JCas aJCas) throws IOException {
deferredActions.set(new ArrayList<>());
TsvFormatHeader format = readFormat(aIn);
TsvSchema schema = readSchema(aIn, aJCas);
// Read the extra blank line after the schema declaration
String emptyLine = aIn.readLine();
assert isEmpty(emptyLine);
TsvDocument doc = new TsvDocument(format, schema, aJCas);
for (TsvColumn column : schema.getColumns()) {
doc.activateColumn(column);
doc.activateType(column.uimaType);
}
readContent(aIn, doc);
// Complete the addition of the chains
CAS cas = aJCas.getCas();
for (TsvChain chain : doc.getChains()) {
if (chain.getElements().isEmpty()) {
continue;
}
Iterator<AnnotationFS> linkIterator = chain.getElements().iterator();
AnnotationFS link = linkIterator.next();
// Create the chain head
FeatureStructure head = cas.createFS(chain.getHeadType());
setFeature(head, CHAIN_FIRST_FEAT, link);
cas.addFsToIndexes(head);
// Connect the links to each other
AnnotationFS prevLink = link;
while (linkIterator.hasNext()) {
link = linkIterator.next();
setFeature(prevLink, CHAIN_NEXT_FEAT, link);
prevLink = link;
}
}
// Run deferred actions
for (Runnable action : deferredActions.get()) {
action.run();
}
}
use of de.tudarmstadt.ukp.clarin.webanno.tsv.internal.tsv3x.model.TsvDocument in project webanno by webanno.
the class Tsv3XSerializerTest method testSingleSubTokenWithValue.
@Test
public void testSingleSubTokenWithValue() throws Exception {
// Create test document
JCas cas = makeJCasOneSentence("This is a test .");
addNamedEntity(cas, 1, 3, "PER");
// Set up TSV schema
TsvSchema schema = new TsvSchema();
Type namedEntityType = cas.getCasType(NamedEntity.type);
schema.addColumn(new TsvColumn(namedEntityType, LayerType.SPAN, "value", FeatureType.PRIMITIVE));
// Convert test document content to TSV model
TsvDocument doc = Tsv3XCasDocumentBuilder.of(schema, cas);
String expectedSentence = "#Text=This is a test .\n" + "1-1\t0-4\tThis\t_\t\n" + "1-1.1\t1-3\thi\tPER\t\n" + "1-2\t5-7\tis\t_\t\n" + "1-3\t8-9\ta\t_\t\n" + "1-4\t10-14\ttest\t_\t\n" + "1-5\t15-16\t.\t_\t\n";
assertEquals(expectedSentence, doc.getSentences().get(0).toString());
}
use of de.tudarmstadt.ukp.clarin.webanno.tsv.internal.tsv3x.model.TsvDocument in project webanno by webanno.
the class Tsv3XSerializerTest method testSingleZeroWidthTokenWithoutValue.
@Test
public void testSingleZeroWidthTokenWithoutValue() throws Exception {
// Create test document
JCas cas = makeJCasOneSentence("This is a test .");
addNamedEntity(cas, 0, 0, null);
// Set up TSV schema
TsvSchema schema = new TsvSchema();
Type namedEntityType = cas.getCasType(NamedEntity.type);
schema.addColumn(new TsvColumn(namedEntityType, LayerType.SPAN, "value", FeatureType.PRIMITIVE));
// Convert test document content to TSV model
TsvDocument doc = Tsv3XCasDocumentBuilder.of(schema, cas);
String expectedSentence = "#Text=This is a test .\n" + "1-1\t0-4\tThis\t_\t\n" + "1-1.1\t0-0\t\t*\t\n" + "1-2\t5-7\tis\t_\t\n" + "1-3\t8-9\ta\t_\t\n" + "1-4\t10-14\ttest\t_\t\n" + "1-5\t15-16\t.\t_\t\n";
assertEquals(expectedSentence, doc.getSentences().get(0).toString());
}
use of de.tudarmstadt.ukp.clarin.webanno.tsv.internal.tsv3x.model.TsvDocument in project webanno by webanno.
the class Tsv3XSerializerTest method testStackedSingleTokenWithValue.
@Test
public void testStackedSingleTokenWithValue() throws Exception {
// Create test document
JCas cas = makeJCasOneSentence("This is a test .");
NamedEntity ne1 = addNamedEntity(cas, 0, 4, "PER");
NamedEntity ne2 = addNamedEntity(cas, 0, 4, "ORG");
// Set up TSV schema
TsvSchema schema = new TsvSchema();
Type namedEntityType = cas.getCasType(NamedEntity.type);
schema.addColumn(new TsvColumn(namedEntityType, LayerType.SPAN, "value", FeatureType.PRIMITIVE));
// Convert test document content to TSV model
TsvDocument doc = Tsv3XCasDocumentBuilder.of(schema, cas);
doc.getSentences().get(0).getTokens().get(0).addUimaAnnotation(ne1, true);
doc.getSentences().get(0).getTokens().get(0).addUimaAnnotation(ne2, true);
assertEquals("1-1\t0-4\tThis\tPER[1]|ORG[2]\t", doc.getSentences().get(0).getTokens().get(0).toString());
String expectedSentence = "#Text=This is a test .\n" + "1-1\t0-4\tThis\tPER[1]|ORG[2]\t\n" + "1-2\t5-7\tis\t_\t\n" + "1-3\t8-9\ta\t_\t\n" + "1-4\t10-14\ttest\t_\t\n" + "1-5\t15-16\t.\t_\t\n";
assertEquals(expectedSentence, doc.getSentences().get(0).toString());
}
Aggregations