use of com.joliciel.talismane.tokeniser.Token in project talismane by joliciel-informatique.
the class TokenOffsetAddressFunction method checkInternal.
@Override
public FeatureResult<TokenWrapper> checkInternal(TokenWrapper tokenWrapper, RuntimeEnvironment env) throws TalismaneException {
TokenWrapper innerWrapper = this.getToken(tokenWrapper, env);
if (innerWrapper == null)
return null;
Token token = innerWrapper.getToken();
FeatureResult<TokenWrapper> result = null;
Token offsetToken = null;
FeatureResult<Integer> offsetResult = offsetFeature.check(innerWrapper, env);
if (offsetResult != null) {
int offset = offsetResult.getOutcome();
if (offset == 0)
offsetToken = token;
else {
int index = 0;
if (token.isWhiteSpace()) {
// non-whitespace token
if (offset > 0) {
if (token.getIndexWithWhiteSpace() - 1 >= 0) {
index = token.getTokenSequence().listWithWhiteSpace().get(token.getIndexWithWhiteSpace() - 1).getIndex();
} else {
index = -1;
}
} else if (offset < 0) {
if (token.getIndexWithWhiteSpace() + 1 < token.getTokenSequence().listWithWhiteSpace().size()) {
index = token.getTokenSequence().listWithWhiteSpace().get(token.getIndexWithWhiteSpace() + 1).getIndex();
} else {
index = token.getTokenSequence().size();
}
}
} else {
// not whitespace
index = token.getIndex();
}
int offsetIndex = index + offset;
if (offsetIndex >= 0 && offsetIndex < token.getTokenSequence().size()) {
offsetToken = token.getTokenSequence().get(offsetIndex);
}
}
}
if (offsetToken != null) {
result = this.generateResult(offsetToken);
}
return result;
}
use of com.joliciel.talismane.tokeniser.Token in project talismane by joliciel-informatique.
the class TokeniserPatternsAndIndexesFeature method checkInternal.
@Override
public FeatureResult<List<WeightedOutcome<String>>> checkInternal(TokenWrapper tokenWrapper, RuntimeEnvironment env) throws TalismaneException {
Token token = tokenWrapper.getToken();
List<WeightedOutcome<String>> resultList = new ArrayList<WeightedOutcome<String>>();
for (TokenPatternMatch tokenMatch : token.getMatches()) {
if (tokenMatch.getIndex() != tokenMatch.getPattern().getIndexesToTest().get(0)) {
resultList.add(new WeightedOutcome<String>(tokenMatch.getPattern().getName() + "ยค" + tokenMatch.getIndex(), 1.0));
}
}
return this.generateResult(resultList);
}
use of com.joliciel.talismane.tokeniser.Token in project talismane by joliciel-informatique.
the class PatternTokeniser method tokeniseInternal.
@Override
protected List<TokenisedAtomicTokenSequence> tokeniseInternal(TokenSequence initialSequence, Sentence sentence) throws TalismaneException, IOException {
List<TokenisedAtomicTokenSequence> sequences;
// Assign each separator its default value
List<TokeniserOutcome> defaultOutcomes = this.tokeniserPatternManager.getDefaultOutcomes(initialSequence);
List<Decision> defaultDecisions = new ArrayList<Decision>(defaultOutcomes.size());
for (TokeniserOutcome outcome : defaultOutcomes) {
Decision tokeniserDecision = new Decision(outcome.name());
tokeniserDecision.addAuthority("_" + this.getClass().getSimpleName());
tokeniserDecision.addAuthority("_" + "DefaultDecision");
defaultDecisions.add(tokeniserDecision);
}
// For each test pattern, see if anything in the sentence matches it
if (this.decisionMaker != null) {
List<TokenPatternMatchSequence> matchingSequences = new ArrayList<TokenPatternMatchSequence>();
Map<Token, Set<TokenPatternMatchSequence>> tokenMatchSequenceMap = new HashMap<Token, Set<TokenPatternMatchSequence>>();
Map<TokenPatternMatchSequence, TokenPatternMatch> primaryMatchMap = new HashMap<TokenPatternMatchSequence, TokenPatternMatch>();
Set<Token> matchedTokens = new HashSet<Token>();
for (TokenPattern parsedPattern : this.getTokeniserPatternManager().getParsedTestPatterns()) {
List<TokenPatternMatchSequence> matchesForThisPattern = parsedPattern.match(initialSequence);
for (TokenPatternMatchSequence matchSequence : matchesForThisPattern) {
if (matchSequence.getTokensToCheck().size() > 0) {
matchingSequences.add(matchSequence);
matchedTokens.addAll(matchSequence.getTokensToCheck());
TokenPatternMatch primaryMatch = null;
Token token = matchSequence.getTokensToCheck().get(0);
Set<TokenPatternMatchSequence> matchSequences = tokenMatchSequenceMap.get(token);
if (matchSequences == null) {
matchSequences = new TreeSet<TokenPatternMatchSequence>();
tokenMatchSequenceMap.put(token, matchSequences);
}
matchSequences.add(matchSequence);
for (TokenPatternMatch patternMatch : matchSequence.getTokenPatternMatches()) {
if (patternMatch.getToken().equals(token)) {
primaryMatch = patternMatch;
break;
}
}
if (LOG.isTraceEnabled()) {
LOG.trace("Found match: " + primaryMatch);
}
primaryMatchMap.put(matchSequence, primaryMatch);
}
}
}
// we want to create the n most likely token sequences
// the sequence has to correspond to a token pattern
Map<TokenPatternMatchSequence, List<Decision>> matchSequenceDecisionMap = new HashMap<TokenPatternMatchSequence, List<Decision>>();
for (TokenPatternMatchSequence matchSequence : matchingSequences) {
TokenPatternMatch match = primaryMatchMap.get(matchSequence);
LOG.debug("next pattern match: " + match.toString());
List<FeatureResult<?>> tokenFeatureResults = new ArrayList<FeatureResult<?>>();
for (TokenPatternMatchFeature<?> feature : features) {
RuntimeEnvironment env = new RuntimeEnvironment();
FeatureResult<?> featureResult = feature.check(match, env);
if (featureResult != null) {
tokenFeatureResults.add(featureResult);
}
}
if (LOG.isTraceEnabled()) {
SortedSet<String> featureResultSet = tokenFeatureResults.stream().map(f -> f.toString()).collect(Collectors.toCollection(() -> new TreeSet<String>()));
for (String featureResultString : featureResultSet) {
LOG.trace(featureResultString);
}
}
List<Decision> decisions = this.decisionMaker.decide(tokenFeatureResults);
for (ClassificationObserver observer : this.observers) observer.onAnalyse(match.getToken(), tokenFeatureResults, decisions);
for (Decision decision : decisions) {
decision.addAuthority("_" + this.getClass().getSimpleName());
decision.addAuthority("_" + "Patterns");
decision.addAuthority(match.getPattern().getName());
}
matchSequenceDecisionMap.put(matchSequence, decisions);
}
// initially create a heap with a single, empty sequence
PriorityQueue<TokenisedAtomicTokenSequence> heap = new PriorityQueue<TokenisedAtomicTokenSequence>();
TokenisedAtomicTokenSequence emptySequence = new TokenisedAtomicTokenSequence(sentence, 0, this.getSessionId());
heap.add(emptySequence);
for (int i = 0; i < initialSequence.listWithWhiteSpace().size(); i++) {
Token token = initialSequence.listWithWhiteSpace().get(i);
if (LOG.isTraceEnabled()) {
LOG.trace("Token : \"" + token.getAnalyisText() + "\"");
}
// build a new heap for this iteration
PriorityQueue<TokenisedAtomicTokenSequence> previousHeap = heap;
heap = new PriorityQueue<TokenisedAtomicTokenSequence>();
if (i == 0) {
// first token is always "separate" from the outside world
Decision decision = new Decision(TokeniserOutcome.SEPARATE.name());
decision.addAuthority("_" + this.getClass().getSimpleName());
decision.addAuthority("_" + "DefaultDecision");
TaggedToken<TokeniserOutcome> taggedToken = new TaggedToken<>(token, decision, TokeniserOutcome.valueOf(decision.getOutcome()));
TokenisedAtomicTokenSequence newSequence = new TokenisedAtomicTokenSequence(emptySequence);
newSequence.add(taggedToken);
heap.add(newSequence);
continue;
}
// limit the heap breadth to K
int maxSequences = previousHeap.size() > this.getBeamWidth() ? this.getBeamWidth() : previousHeap.size();
for (int j = 0; j < maxSequences; j++) {
TokenisedAtomicTokenSequence history = previousHeap.poll();
// Find the separating & non-separating decisions
if (history.size() > i) {
// token already added as part of a sequence
// introduced by another token
heap.add(history);
} else if (tokenMatchSequenceMap.containsKey(token)) {
// token begins one or more match sequences
// these are ordered from shortest to longest (via
// TreeSet)
List<TokenPatternMatchSequence> matchSequences = new ArrayList<TokenPatternMatchSequence>(tokenMatchSequenceMap.get(token));
// Since sequences P1..Pn contain each other,
// there can be exactly matchSequences.size()
// consistent solutions
// Assume the default is separate
// 0: all separate
// 1: join P1, separate rest
// 2: join P2, separate rest
// ...
// n: join Pn
// We need to add each of these to the heap
// by taking the product of all probabilities
// consistent with each solution
// The probabities for each solution are (j=join,
// s=separate)
// All separate: s1 x s2 x ... x sn
// P1: j1 x s2 x ... x sn
// P2: j1 x j2 x ... x sn
// ...
// Pn: j1 x j2 x ... x jn
// Any solution of the form s1 x j2 would be
// inconsistent, and is not considered
// If Pi and Pj start and end on the exact same
// token, then the solution for both is
// Pi: j1 x ... x ji x jj x sj+1 ... x sn
// Pj: j1 x ... x ji x jj x sj+1 ... x sn
// Note of course that we're never likely to have
// more than two Ps here,
// but we need a solution for more just to be sure
// to be sure
TokeniserOutcome defaultOutcome = TokeniserOutcome.valueOf(defaultDecisions.get(token.getIndexWithWhiteSpace()).getOutcome());
TokeniserOutcome otherOutcome = null;
if (defaultOutcome == TokeniserOutcome.SEPARATE)
otherOutcome = TokeniserOutcome.JOIN;
else
otherOutcome = TokeniserOutcome.SEPARATE;
double[] decisionProbs = new double[matchSequences.size() + 1];
for (int k = 0; k < decisionProbs.length; k++) decisionProbs[k] = 1;
// Note: k0 = default decision (e.g. separate all),
// k1=first pattern
// p1 = first pattern
int p = 1;
int prevEndIndex = -1;
for (TokenPatternMatchSequence matchSequence : matchSequences) {
int endIndex = matchSequence.getTokensToCheck().get(matchSequence.getTokensToCheck().size() - 1).getEndIndex();
List<Decision> decisions = matchSequenceDecisionMap.get(matchSequence);
for (Decision decision : decisions) {
for (int k = 0; k < decisionProbs.length; k++) {
if (decision.getOutcome().equals(defaultOutcome.name())) {
// e.g. separate in most cases
if (k < p && endIndex > prevEndIndex)
decisionProbs[k] *= decision.getProbability();
else if (k + 1 < p && endIndex <= prevEndIndex)
decisionProbs[k] *= decision.getProbability();
} else {
// e.g. join in most cases
if (k >= p && endIndex > prevEndIndex)
decisionProbs[k] *= decision.getProbability();
else if (k + 1 >= p && endIndex <= prevEndIndex)
decisionProbs[k] *= decision.getProbability();
}
}
// next k
}
// next decision (only 2 of these)
prevEndIndex = endIndex;
p++;
}
// transform to probability distribution
double sumProbs = 0;
for (int k = 0; k < decisionProbs.length; k++) sumProbs += decisionProbs[k];
if (sumProbs > 0)
for (int k = 0; k < decisionProbs.length; k++) decisionProbs[k] /= sumProbs;
// Apply default decision
// Since this is the default decision for all tokens
// in the sequence, we don't add the other tokens
// for now,
// so as to allow them
// to get examined one at a time, just in case one
// of them starts its own separate sequence
Decision defaultDecision = new Decision(defaultOutcome.name(), decisionProbs[0]);
defaultDecision.addAuthority("_" + this.getClass().getSimpleName());
defaultDecision.addAuthority("_" + "Patterns");
for (TokenPatternMatchSequence matchSequence : matchSequences) {
defaultDecision.addAuthority(matchSequence.getTokenPattern().getName());
}
TaggedToken<TokeniserOutcome> defaultTaggedToken = new TaggedToken<>(token, defaultDecision, TokeniserOutcome.valueOf(defaultDecision.getOutcome()));
TokenisedAtomicTokenSequence defaultSequence = new TokenisedAtomicTokenSequence(history);
defaultSequence.add(defaultTaggedToken);
defaultSequence.addDecision(defaultDecision);
heap.add(defaultSequence);
// Apply one non-default decision per match sequence
for (int k = 0; k < matchSequences.size(); k++) {
TokenPatternMatchSequence matchSequence = matchSequences.get(k);
double prob = decisionProbs[k + 1];
Decision decision = new Decision(otherOutcome.name(), prob);
decision.addAuthority("_" + this.getClass().getSimpleName());
decision.addAuthority("_" + "Patterns");
decision.addAuthority(matchSequence.getTokenPattern().getName());
TaggedToken<TokeniserOutcome> taggedToken = new TaggedToken<>(token, decision, TokeniserOutcome.valueOf(decision.getOutcome()));
TokenisedAtomicTokenSequence newSequence = new TokenisedAtomicTokenSequence(history);
newSequence.add(taggedToken);
newSequence.addDecision(decision);
// in this sequence to the solution
for (Token tokenInSequence : matchSequence.getTokensToCheck()) {
if (tokenInSequence.equals(token)) {
continue;
}
Decision decisionInSequence = new Decision(decision.getOutcome());
decisionInSequence.addAuthority("_" + this.getClass().getSimpleName());
decisionInSequence.addAuthority("_" + "DecisionInSequence");
decisionInSequence.addAuthority("_" + "DecisionInSequence_non_default");
decisionInSequence.addAuthority("_" + "Patterns");
TaggedToken<TokeniserOutcome> taggedTokenInSequence = new TaggedToken<>(tokenInSequence, decisionInSequence, TokeniserOutcome.valueOf(decisionInSequence.getOutcome()));
newSequence.add(taggedTokenInSequence);
}
heap.add(newSequence);
}
// next sequence
} else {
// token doesn't start match sequence, and hasn't
// already been added to the current sequence
Decision decision = defaultDecisions.get(i);
if (matchedTokens.contains(token)) {
decision = new Decision(decision.getOutcome());
decision.addAuthority("_" + this.getClass().getSimpleName());
decision.addAuthority("_" + "DecisionInSequence");
decision.addAuthority("_" + "DecisionInSequence_default");
decision.addAuthority("_" + "Patterns");
}
TaggedToken<TokeniserOutcome> taggedToken = new TaggedToken<>(token, decision, TokeniserOutcome.valueOf(decision.getOutcome()));
TokenisedAtomicTokenSequence newSequence = new TokenisedAtomicTokenSequence(history);
newSequence.add(taggedToken);
heap.add(newSequence);
}
}
// next sequence in the old heap
}
// next token
sequences = new ArrayList<TokenisedAtomicTokenSequence>();
int k = 0;
while (!heap.isEmpty()) {
sequences.add(heap.poll());
k++;
if (k >= this.getBeamWidth())
break;
}
} else {
sequences = new ArrayList<TokenisedAtomicTokenSequence>();
TokenisedAtomicTokenSequence defaultSequence = new TokenisedAtomicTokenSequence(sentence, 0, this.getSessionId());
int i = 0;
for (Token token : initialSequence.listWithWhiteSpace()) {
Decision decision = defaultDecisions.get(i++);
TaggedToken<TokeniserOutcome> taggedToken = new TaggedToken<>(token, decision, TokeniserOutcome.valueOf(decision.getOutcome()));
defaultSequence.add(taggedToken);
}
sequences.add(defaultSequence);
}
// have decision maker?
return sequences;
}
use of com.joliciel.talismane.tokeniser.Token in project talismane by joliciel-informatique.
the class TokenPattern method match.
/**
* Return a TokenPatternMatchSequence for each sequence of <i>n</i> tokens in
* a TokenSequence which match this pattern. Will also add any matches to
* Token.getMatches() for the matched tokens.
*/
public List<TokenPatternMatchSequence> match(TokenSequence tokenSequence) {
List<TokenPatternMatchSequence> matchingSequences = new ArrayList<TokenPatternMatchSequence>();
boolean matchSentenceStart = false;
if (this.getParsedPattern().get(0).pattern().equals("\\b")) {
matchSentenceStart = true;
}
boolean matchSentenceEnd = false;
if (this.getParsedPattern().get(this.getParsedPattern().size() - 1).pattern().equals("\\b")) {
matchSentenceEnd = true;
}
for (int t0 = -1; t0 < tokenSequence.listWithWhiteSpace().size(); t0++) {
boolean haveMatch = false;
List<Token> matchingSequence = new ArrayList<Token>();
if (t0 >= 0) {
// does the current token match the beginning of the pattern?
Token token = tokenSequence.listWithWhiteSpace().get(t0);
if (checkTokenForMatch(this.getParsedPattern().get(0), token)) {
// potential match, let's follow it through
haveMatch = true;
// we match so far, add it to the temp list
matchingSequence.add(token);
}
} else if (matchSentenceStart) {
// automatically match start of sentence
haveMatch = true;
// add null token to the temp list
matchingSequence.add(null);
}
if (haveMatch) {
int p = 1;
int t1 = t0 + 1;
while (p < this.getParsedPattern().size() && t1 < tokenSequence.listWithWhiteSpace().size()) {
Token aToken = tokenSequence.listWithWhiteSpace().get(t1);
Pattern pattern = this.getParsedPattern().get(p);
if (checkTokenForMatch(pattern, aToken)) {
// we match so far, add it to the temp list
matchingSequence.add(aToken);
} else {
// pattern doesn't match
haveMatch = false;
break;
}
p++;
t1++;
}
if (t1 == tokenSequence.listWithWhiteSpace().size() && p == this.getParsedPattern().size() - 1 && matchSentenceEnd) {
// add a null token representing the sentence end
matchingSequence.add(null);
}
// sentence first)
if (matchingSequence.size() != this.getParsedPattern().size()) {
haveMatch = false;
}
}
if (haveMatch) {
TokenPatternMatchSequence tokenPatternMatchSequence = new TokenPatternMatchSequence(this, matchingSequence);
matchingSequences.add(tokenPatternMatchSequence);
for (Token aToken : matchingSequence) {
tokenPatternMatchSequence.addMatch(aToken);
}
}
}
if (LOG.isTraceEnabled()) {
if (matchingSequences.size() > 0)
LOG.trace(this.getName() + ": matchingSequences = " + matchingSequences);
}
return matchingSequences;
}
use of com.joliciel.talismane.tokeniser.Token in project talismane by joliciel-informatique.
the class TokeniserPatternManager method getDefaultOutcomes.
/**
* Takes a sequence of atomic tokens and applies default decisions for each
* separator.
*/
public List<TokeniserOutcome> getDefaultOutcomes(TokenSequence tokenSequence) {
List<TokeniserOutcome> defaultOutcomes = new ArrayList<TokeniserOutcome>();
// Assign each separator its default value
TokeniserOutcome nextOutcome = TokeniserOutcome.SEPARATE;
Pattern tokenSeparators = Tokeniser.getTokenSeparators(sessionId);
for (Token token : tokenSequence.listWithWhiteSpace()) {
TokeniserOutcome outcome = null;
if (tokenSeparators.matcher(token.getAnalyisText()).matches()) {
boolean defaultValueFound = false;
for (Entry<SeparatorDecision, Pattern> entry : this.getSeparatorDefaultPatterns().entrySet()) {
if (entry.getValue().matcher(token.getAnalyisText()).matches()) {
defaultValueFound = true;
SeparatorDecision defaultSeparatorDecision = entry.getKey();
switch(defaultSeparatorDecision) {
case IS_SEPARATOR:
outcome = TokeniserOutcome.SEPARATE;
nextOutcome = TokeniserOutcome.SEPARATE;
break;
case IS_NOT_SEPARATOR:
outcome = TokeniserOutcome.JOIN;
nextOutcome = TokeniserOutcome.JOIN;
break;
case IS_SEPARATOR_BEFORE:
outcome = TokeniserOutcome.SEPARATE;
nextOutcome = TokeniserOutcome.JOIN;
case IS_SEPARATOR_AFTER:
outcome = TokeniserOutcome.JOIN;
nextOutcome = TokeniserOutcome.SEPARATE;
case NOT_APPLICABLE:
break;
default:
break;
}
break;
}
}
if (!defaultValueFound) {
outcome = TokeniserOutcome.SEPARATE;
nextOutcome = TokeniserOutcome.SEPARATE;
}
defaultOutcomes.add(outcome);
} else {
defaultOutcomes.add(nextOutcome);
}
}
return defaultOutcomes;
}
Aggregations