use of edu.illinois.cs.cogcomp.edison.features.Feature in project cogcomp-nlp by CogComp.
the class LabelTwoBefore method getFeatures.
@Override
public Set<Feature> getFeatures(Constituent c) throws EdisonException {
String classifier;
String prefix = "LabelTwoBefore";
TextAnnotation ta = c.getTextAnnotation();
int start = c.getStartSpan() - 2;
int end = c.getEndSpan() - 2;
Set<Feature> features = new LinkedHashSet<>();
for (int i = start; i < end; i++) {
if (!isPOSFromCounting) {
classifier = prefix + "_" + "POS";
if (i >= 0) {
TokenLabelView POSView = (TokenLabelView) ta.getView(ViewNames.POS);
String form = ta.getToken(i);
String tag = POSView.getLabel(i);
features.add(new DiscreteFeature(classifier + ":" + tag + "_" + form));
} else
features.add(new DiscreteFeature(classifier + ":" + ""));
} else if (isBaseLineCounting) {
classifier = prefix + "_" + "BaselinePOS";
if (i >= 0) {
String form = ta.getToken(i);
String tag = counter.tag(i, ta);
features.add(new DiscreteFeature(classifier + ":" + tag + "_" + form));
} else
features.add(new DiscreteFeature(classifier + ":" + ""));
} else {
classifier = prefix + "_" + "MikheevPOS";
if (i >= 0) {
String form = ta.getToken(i);
String tag = counter.tag(i, ta);
features.add(new DiscreteFeature(classifier + ":" + tag + "_" + form));
} else
features.add(new DiscreteFeature(classifier + ":" + ""));
}
}
return features;
}
use of edu.illinois.cs.cogcomp.edison.features.Feature in project cogcomp-nlp by CogComp.
the class MixedChunkWindowTwoBeforePOSWindowThreeBefore method getFeatures.
@Override
public /**
* This feature extractor assumes that the TOKEN View, POS View and the SHALLOW_PARSE View have been
* generated in the Constituents TextAnnotation. It will use its own POS tag and well as the POS tag
* and the SHALLOW_PARSE (Chunk) labels of the previous two tokens and return it as a discrete feature.
*
**/
Set<Feature> getFeatures(Constituent c) throws EdisonException {
TextAnnotation ta = c.getTextAnnotation();
try {
TOKENS = ta.getView(ViewNames.TOKENS);
POS = ta.getView(ViewNames.POS);
SHALLOW_PARSE = ta.getView(ViewNames.SHALLOW_PARSE);
} catch (Exception e) {
e.printStackTrace();
}
// We can assume that the constituent in this case is a Word(Token) described by the LBJ
// chunk definition
int startspan = c.getStartSpan();
int endspan = c.getEndSpan();
// All our constituents are words(tokens)
// words two before
int k = -2;
List<Constituent> wordstwobefore = getwordskfrom(TOKENS, startspan, endspan, k);
if (wordstwobefore.size() != 2)
return null;
String[] tags = new String[3];
String[] labels = new String[2];
int i = 0;
for (Constituent token : wordstwobefore) {
// Should only be one POS tag for each token
List<String> POS_tag = POS.getLabelsCoveringSpan(token.getStartSpan(), token.getEndSpan());
List<String> Chunk_label = SHALLOW_PARSE.getLabelsCoveringSpan(token.getStartSpan(), token.getEndSpan());
if (POS_tag.size() != 1 || Chunk_label.size() != 1) {
logger.warn("Error token has more than one POS tag or Chunk Label.");
}
labels[i] = Chunk_label.get(0);
tags[i] = POS_tag.get(0);
i++;
}
tags[i] = POS.getLabelsCoveringSpan(startspan, endspan).get(0);
Set<Feature> __result = new LinkedHashSet<Feature>();
String classifier = "MixedChunkWindowTwoBeforePOSWindowThreeBefore";
String __id = classifier + ":" + "ll";
String __value = "(" + (labels[0] + "_" + labels[1]) + ")";
/*
* BufferedWriter output = null; try { File file = new
* File("/home/pvijaya2/feat-output.txt");
*
* if(!file.exists()){ file.createNewFile(); }
*
* FileWriter fw = new FileWriter(file,true);
*
* //BufferedWriter writer give better performance BufferedWriter bw = new
* BufferedWriter(fw);
*/
logger.info(__id + __value);
__result.add(new DiscreteFeature(__id + __value));
__id = classifier + ":" + "lt1";
__value = "(" + (labels[0] + "_" + tags[1]) + ")";
logger.info(__id + __value);
__result.add(new DiscreteFeature(__id + __value));
__id = classifier + ":" + "lt2";
__value = "" + (labels[1] + "_" + tags[2]);
logger.info(__id + __value);
__result.add(new DiscreteFeature(__id + __value));
return __result;
}
use of edu.illinois.cs.cogcomp.edison.features.Feature in project cogcomp-nlp by CogComp.
the class POSBaseLineFeatureExtractor method getFeatures.
@Override
public Set<Feature> getFeatures(Constituent c) throws EdisonException {
String classifier = "BaseLinePOS";
TextAnnotation ta = c.getTextAnnotation();
int start = c.getStartSpan();
int end = c.getEndSpan();
Set<Feature> features = new LinkedHashSet<>();
for (int i = start; i < end; i++) {
String form = ta.getToken(i);
String tag = counter.tag(i, ta);
features.add(new DiscreteFeature(classifier + ":" + tag + "_" + form));
}
return features;
}
use of edu.illinois.cs.cogcomp.edison.features.Feature in project cogcomp-nlp by CogComp.
the class TestWordConjunctionOneTwoThreeGramWindowTwo method test.
@Test
public final void test() throws EdisonException {
// Using the 3rd constituent as a test
List<Constituent> testList = ta.getView("TOKENS").getConstituents();
Constituent test = testList.get(3);
WordConjunctionOneTwoThreeGramWindowTwo fex = new WordConjunctionOneTwoThreeGramWindowTwo("WordConj3GramWin2");
Set<Feature> feats = fex.getFeatures(test);
String[] expected_outputs = { "WordConjunctionOneTwoThreeGramWindowTwo:-2_1(construction)", "WordConjunctionOneTwoThreeGramWindowTwo:-1_1(of)", "WordConjunctionOneTwoThreeGramWindowTwo:0_1(the)", "WordConjunctionOneTwoThreeGramWindowTwo:1_1(John)", "WordConjunctionOneTwoThreeGramWindowTwo:2_1(Smith)", "WordConjunctionOneTwoThreeGramWindowTwo:-2_2(construction_of)", "WordConjunctionOneTwoThreeGramWindowTwo:-1_2(of_the)", "WordConjunctionOneTwoThreeGramWindowTwo:0_2(the_John)", "WordConjunctionOneTwoThreeGramWindowTwo:1_2(John_Smith)", "WordConjunctionOneTwoThreeGramWindowTwo:2_2(Smith)", "WordConjunctionOneTwoThreeGramWindowTwo:-2_3(construction_of_the)", "WordConjunctionOneTwoThreeGramWindowTwo:-1_3(of_the_John)", "WordConjunctionOneTwoThreeGramWindowTwo:0_3(the_John_Smith)", "WordConjunctionOneTwoThreeGramWindowTwo:1_3(John_Smith)", "WordConjunctionOneTwoThreeGramWindowTwo:2_3(Smith)" };
if (feats == null)
fail("Feats are returning NULL.");
for (Feature f : feats) {
assertTrue(ArrayUtils.contains(expected_outputs, f.getName()));
}
}
use of edu.illinois.cs.cogcomp.edison.features.Feature in project cogcomp-nlp by CogComp.
the class TestBrownClusterFeatureExtractor method test.
@Test
public final void test() {
int[] prefixLengths = new int[] { 4, 6, 10, 20 };
BrownClusterFeatureExtractor bcfex1 = BrownClusterFeatureExtractor.instance1000;
BrownClusterFeatureExtractor bcfex2 = null;
try {
bcfex2 = new BrownClusterFeatureExtractor("bllip", "brownBllipClusters", prefixLengths);
} catch (EdisonException e) {
e.printStackTrace();
fail(e.getMessage());
}
BrownClusterFeatureExtractor bcfex3 = null;
try {
bcfex3 = new BrownClusterFeatureExtractor("wiki", "brown-english-wikitext.case-intact.txt-c1000-freq10-v3.txt", prefixLengths);
} catch (EdisonException e) {
e.printStackTrace();
fail(e.getMessage());
}
TokenizerTextAnnotationBuilder taBldr = new TokenizerTextAnnotationBuilder(new StatefulTokenizer());
TextAnnotation ta = taBldr.createTextAnnotation("test", "test", "This test sentence has Joynt and Lieberknecht and Fibonnaci in it " + "just to exercise possible brown cluster hits in resources used by NER.");
Set<Feature> feats = new HashSet<>();
for (int wordIndex = 0; wordIndex < ta.size(); ++wordIndex) try {
feats.addAll(bcfex1.getWordFeatures(ta, wordIndex));
feats.addAll(bcfex2.getWordFeatures(ta, wordIndex));
feats.addAll(bcfex3.getWordFeatures(ta, wordIndex));
} catch (EdisonException e) {
e.printStackTrace();
fail(e.getMessage());
}
assertTrue(ta.hasView(ViewNames.BROWN_CLUSTERS + "_wiki"));
String[] featArray = new String[feats.size()];
int i = 0;
for (Feature f : feats) featArray[i++] = f.toString();
Arrays.sort(featArray);
String actualOutput = StringUtils.join(",", featArray);
assertEquals(expectedOutput, actualOutput);
}
Aggregations