use of edu.stanford.nlp.trees.HeadFinder in project CoreNLP by stanfordnlp.
the class Preferences method lookupHeadFinder.
static HeadFinder lookupHeadFinder(String headfinderName) {
if (headfinderName.equalsIgnoreCase("ArabicHeadFinder")) {
return new ArabicHeadFinder();
} else if (headfinderName.equalsIgnoreCase("BikelChineseHeadFinder")) {
return new BikelChineseHeadFinder();
} else if (headfinderName.equalsIgnoreCase("ChineseHeadFinder")) {
return new ChineseHeadFinder();
} else if (headfinderName.equalsIgnoreCase("ChineseSemanticHeadFinder")) {
return new ChineseSemanticHeadFinder();
} else if (headfinderName.equalsIgnoreCase("CollinsHeadFinder")) {
return new CollinsHeadFinder();
} else if (headfinderName.equalsIgnoreCase("DybroFrenchHeadFinder")) {
return new DybroFrenchHeadFinder();
} else if (headfinderName.equalsIgnoreCase("LeftHeadFinder")) {
return new LeftHeadFinder();
} else if (headfinderName.equalsIgnoreCase("ModCollinsHeadFinder")) {
return new ModCollinsHeadFinder();
} else if (headfinderName.equalsIgnoreCase("NegraHeadFinder")) {
return new NegraHeadFinder();
} else if (headfinderName.equalsIgnoreCase("SemanticHeadFinder")) {
return new SemanticHeadFinder();
} else if (headfinderName.equalsIgnoreCase("SunJurafskyChineseHeadFinder")) {
return new SunJurafskyChineseHeadFinder();
} else if (headfinderName.equalsIgnoreCase("TueBaDZHeadFinder")) {
return new TueBaDZHeadFinder();
} else if (headfinderName.equalsIgnoreCase("UniversalSemanticHeadFinder")) {
return new UniversalSemanticHeadFinder();
} else {
//try to find the class
try {
Class<?> headfinder = Class.forName(headfinderName);
HeadFinder hf = (HeadFinder) headfinder.newInstance();
return hf;
} catch (Exception e) {
return null;
}
}
}
use of edu.stanford.nlp.trees.HeadFinder in project CoreNLP by stanfordnlp.
the class TregexGUI method loadPreferences.
/**
* Load and apply application preferences.
*/
void loadPreferences() {
//general parameters
InputPanel.getInstance().enableTsurgeon(Preferences.getEnableTsurgeon());
MatchesPanel.getInstance().setShowOnlyMatchedPortion(Preferences.getMatchPortionOnly());
//display stuff
MatchesPanel.getInstance().setHighlightColor(Preferences.getHighlightColor());
InputPanel.getInstance().setNumRecentPatterns(Preferences.getHistorySize());
MatchesPanel.getInstance().setMaxMatches(Preferences.getMaxMatches());
//tree display stuff
DisplayMatchesPanel.getInstance().setMatchedColor(Preferences.getMatchedColor());
DisplayMatchesPanel.getInstance().setDefaultColor(Preferences.getTreeColor());
DisplayMatchesPanel.getInstance().setFontName(Preferences.getFont());
MatchesPanel.getInstance().setFontName(Preferences.getFont());
int fontSize = Preferences.getFontSize();
if (fontSize != 0)
DisplayMatchesPanel.getInstance().setFontSize(Preferences.getFontSize());
//advanced stuff
HeadFinder hf = Preferences.getHeadFinder();
InputPanel.getInstance().setHeadFinder(hf);
TreeReaderFactory trf = Preferences.getTreeReaderFactory();
FilePanel.getInstance().setTreeReaderFactory(trf);
String hfName = hf.getClass().getSimpleName();
String trfName = trf.getClass().getSimpleName();
String encoding = Preferences.getEncoding();
if (encoding != null && !encoding.equals(""))
FileTreeModel.setCurEncoding(encoding);
if (PreferencesPanel.isChinese(hfName, trfName))
setChineseFont();
else if (PreferencesPanel.isArabic(hfName, trfName))
setArabicFont();
if (preferenceDialog == null)
preferenceDialog = new PreferencesPanel(this);
preferenceDialog.checkEncodingAndDisplay(hfName, trfName);
}
use of edu.stanford.nlp.trees.HeadFinder in project CoreNLP by stanfordnlp.
the class CoNLLDocumentReader method writeTabSep.
public static void writeTabSep(PrintWriter pw, CoreMap sentence, CollectionValuedMap<String, CoreMap> chainmap) {
HeadFinder headFinder = new ModCollinsHeadFinder();
List<CoreLabel> sentenceAnno = sentence.get(CoreAnnotations.TokensAnnotation.class);
Tree sentenceTree = sentence.get(TreeCoreAnnotations.TreeAnnotation.class);
Map<Pair<Integer, Integer>, String> sentenceInfo = Generics.newHashMap();
Set<Tree> sentenceSubTrees = sentenceTree.subTrees();
sentenceTree.setSpans();
Map<Pair<Integer, Integer>, Tree> treeSpanMap = Generics.newHashMap();
Map<Pair<Integer, Integer>, List<Tree>> wordSpanMap = Generics.newHashMap();
for (Tree ctree : sentenceSubTrees) {
IntPair span = ctree.getSpan();
if (span != null) {
treeSpanMap.put(Pair.makePair(span.getSource(), span.getTarget()), ctree);
wordSpanMap.put(Pair.makePair(span.getSource(), span.getTarget()), ctree.getLeaves());
}
}
String[][] finalSentence;
finalSentence = new String[sentenceAnno.size()][];
Map<Pair<Integer, Integer>, String> allHeads = Generics.newHashMap();
int index = -1;
for (CoreLabel newAnno : sentenceAnno) {
index += 1;
String word = newAnno.word();
String tag = newAnno.tag();
String cat = newAnno.ner();
String coref = newAnno.get(CorefCoreAnnotations.CorefAnnotation.class);
finalSentence[index] = new String[4];
finalSentence[index][0] = word;
finalSentence[index][1] = tag;
finalSentence[index][2] = cat;
finalSentence[index][3] = coref;
if (coref == null) {
sentenceInfo.put(Pair.makePair(index, index), coref);
finalSentence[index][3] = "O";
} else {
String[] allC = coref.split("\\|");
for (String corefG : allC) {
Pair<Integer, Integer> mention = getMention(index, corefG, sentenceAnno);
if (!include(sentenceInfo, mention, corefG)) {
// find largest NP in mention
sentenceInfo.put(mention, corefG);
Tree mentionTree = treeSpanMap.get(mention);
String head = null;
if (mentionTree != null) {
head = mentionTree.headTerminal(headFinder).nodeString();
} else if (mention.first.equals(mention.second)) {
head = word;
}
allHeads.put(mention, head);
}
}
if (allHeads.values().contains(word)) {
finalSentence[index][3] = "MENTION";
} else {
finalSentence[index][3] = "O";
}
}
}
for (int i = 0; i < finalSentence.length; i++) {
String[] wordInfo = finalSentence[i];
if (i < finalSentence.length - 1) {
String[] nextWordInfo = finalSentence[i + 1];
if (nextWordInfo[3].equals("MENTION") && nextWordInfo[0].equals("'s")) {
wordInfo[3] = "MENTION";
finalSentence[i + 1][3] = "O";
}
}
pw.println(wordInfo[0] + "\t" + wordInfo[1] + "\t" + wordInfo[2] + "\t" + wordInfo[3]);
}
pw.println("");
}
use of edu.stanford.nlp.trees.HeadFinder in project CoreNLP by stanfordnlp.
the class NoPunctuationHeadFinder method main.
public static void main(String[] args) {
// simple testing code
Treebank treebank = new DiskTreebank();
CategoryWordTag.suppressTerminalDetails = true;
treebank.loadPath(args[0]);
final HeadFinder chf = new NoPunctuationHeadFinder();
treebank.apply(pt -> {
pt.percolateHeads(chf);
pt.pennPrint();
System.out.println();
});
}
use of edu.stanford.nlp.trees.HeadFinder in project CoreNLP by stanfordnlp.
the class ShiftReduceParser method binarizeTreebank.
public static List<Tree> binarizeTreebank(Treebank treebank, Options op) {
TreeBinarizer binarizer = TreeBinarizer.simpleTreeBinarizer(op.tlpParams.headFinder(), op.tlpParams.treebankLanguagePack());
BasicCategoryTreeTransformer basicTransformer = new BasicCategoryTreeTransformer(op.langpack());
CompositeTreeTransformer transformer = new CompositeTreeTransformer();
transformer.addTransformer(binarizer);
transformer.addTransformer(basicTransformer);
treebank = treebank.transform(transformer);
HeadFinder binaryHeadFinder = new BinaryHeadFinder(op.tlpParams.headFinder());
List<Tree> binarizedTrees = Generics.newArrayList();
for (Tree tree : treebank) {
Trees.convertToCoreLabels(tree);
tree.percolateHeadAnnotations(binaryHeadFinder);
// Index from 1. Tools downstream expect index from 1, so for
// uses internal to the srparser we have to renormalize the
// indices, with the result that here we have to index from 1
tree.indexLeaves(1, true);
binarizedTrees.add(tree);
}
return binarizedTrees;
}
Aggregations