use of java.io.StringReader in project springside4 by springside.
the class PropertiesUtil method loadFromString.
/**
* 从字符串内容加载Properties
*/
public static Properties loadFromString(String content) {
Properties p = new Properties();
Reader reader = new StringReader(content);
try {
p.load(reader);
} catch (IOException ignored) {
} finally {
IOUtil.closeQuietly(reader);
}
return p;
}
use of java.io.StringReader in project CoreNLP by stanfordnlp.
the class DependencyParserDemo method main.
public static void main(String[] args) {
String modelPath = DependencyParser.DEFAULT_MODEL;
String taggerPath = "edu/stanford/nlp/models/pos-tagger/english-left3words/english-left3words-distsim.tagger";
for (int argIndex = 0; argIndex < args.length; ) {
switch(args[argIndex]) {
case "-tagger":
taggerPath = args[argIndex + 1];
argIndex += 2;
break;
case "-model":
modelPath = args[argIndex + 1];
argIndex += 2;
break;
default:
throw new RuntimeException("Unknown argument " + args[argIndex]);
}
}
String text = "I can almost always tell when movies use fake dinosaurs.";
MaxentTagger tagger = new MaxentTagger(taggerPath);
DependencyParser parser = DependencyParser.loadFromModelFile(modelPath);
DocumentPreprocessor tokenizer = new DocumentPreprocessor(new StringReader(text));
for (List<HasWord> sentence : tokenizer) {
List<TaggedWord> tagged = tagger.tagSentence(sentence);
GrammaticalStructure gs = parser.predict(tagged);
// Print typed dependencies
log.info(gs);
}
}
use of java.io.StringReader in project CoreNLP by stanfordnlp.
the class ParserDemo method demoAPI.
/**
* demoAPI demonstrates other ways of calling the parser with
* already tokenized text, or in some cases, raw text that needs to
* be tokenized as a single sentence. Output is handled with a
* TreePrint object. Note that the options used when creating the
* TreePrint can determine what results to print out. Once again,
* one can capture the output by passing a PrintWriter to
* TreePrint.printTree. This code is for English.
*/
public static void demoAPI(LexicalizedParser lp) {
// This option shows parsing a list of correctly tokenized words
String[] sent = { "This", "is", "an", "easy", "sentence", "." };
List<CoreLabel> rawWords = SentenceUtils.toCoreLabelList(sent);
Tree parse = lp.apply(rawWords);
parse.pennPrint();
System.out.println();
// This option shows loading and using an explicit tokenizer
String sent2 = "This is another sentence.";
TokenizerFactory<CoreLabel> tokenizerFactory = PTBTokenizer.factory(new CoreLabelTokenFactory(), "");
Tokenizer<CoreLabel> tok = tokenizerFactory.getTokenizer(new StringReader(sent2));
List<CoreLabel> rawWords2 = tok.tokenize();
parse = lp.apply(rawWords2);
// PennTreebankLanguagePack for English
TreebankLanguagePack tlp = lp.treebankLanguagePack();
GrammaticalStructureFactory gsf = tlp.grammaticalStructureFactory();
GrammaticalStructure gs = gsf.newGrammaticalStructure(parse);
List<TypedDependency> tdl = gs.typedDependenciesCCprocessed();
System.out.println(tdl);
System.out.println();
// You can also use a TreePrint object to print trees and dependencies
TreePrint tp = new TreePrint("penn,typedDependenciesCollapsed");
tp.printTree(parse);
}
use of java.io.StringReader in project CoreNLP by stanfordnlp.
the class InputPanel method runScript.
private void runScript() {
setTsurgeonState(true);
final String script = tsurgeonScript.getText();
searchThread = new Thread() {
@Override
public void run() {
try {
BufferedReader reader = new BufferedReader(new StringReader(script));
TsurgeonPattern operation = Tsurgeon.getTsurgeonOperationsFromReader(reader);
final String text = tregexPattern.getText().intern();
SwingUtilities.invokeLater(() -> {
InputPanel.this.addRecentTregexPattern(text);
useProgressBar(true);
});
final TRegexGUITreeVisitor visitor = getMatchTreeVisitor(text, this);
//means the tregex errored out
if (visitor == null)
return;
if (this.isInterrupted()) {
returnToValidState(text, visitor, new ArrayList<>());
return;
}
//log.info("Running Script with matches: " + visitor.getMatches());
List<TreeFromFile> trees = visitor.getMatches();
final List<TreeFromFile> modifiedTrees = new ArrayList<>();
for (TreeFromFile tff : trees) {
if (this.isInterrupted()) {
returnToValidState(text, visitor, trees);
return;
}
Tree modifiedTree = Tsurgeon.processPattern(visitor.getPattern(), operation, tff.getTree());
modifiedTrees.add(new TreeFromFile(modifiedTree, tff.getFilename().intern()));
}
returnToValidState(text, visitor, modifiedTrees);
} catch (Exception e) {
doError("Sorry, there was an error compiling or running the Tsurgeon script. Please press Help if you need assistance.", e);
SwingUtilities.invokeLater(() -> {
setTregexState(false);
setTsurgeonState(false);
InputPanel.this.searchThread = null;
});
}
}
};
searchThread.start();
}
use of java.io.StringReader in project CoreNLP by stanfordnlp.
the class ChineseCorefBenchmarkSlowITest method getCorefResults.
private static Counter<String> getCorefResults(String resultsString) throws IOException {
Counter<String> results = new ClassicCounter<String>();
BufferedReader r = new BufferedReader(new StringReader(resultsString));
for (String line; (line = r.readLine()) != null; ) {
Matcher m1 = MENTION_PATTERN.matcher(line);
if (m1.matches()) {
results.setCount(MENTION_TP, Double.parseDouble(m1.group(1)));
results.setCount(MENTION_F1, Double.parseDouble(m1.group(2)));
}
Matcher m2 = MUC_PATTERN.matcher(line);
if (m2.matches()) {
results.setCount(MUC_TP, Double.parseDouble(m2.group(1)));
results.setCount(MUC_F1, Double.parseDouble(m2.group(2)));
}
Matcher m3 = BCUBED_PATTERN.matcher(line);
if (m3.matches()) {
results.setCount(BCUBED_TP, Double.parseDouble(m3.group(1)));
results.setCount(BCUBED_F1, Double.parseDouble(m3.group(2)));
}
Matcher m4 = CEAFM_PATTERN.matcher(line);
if (m4.matches()) {
results.setCount(CEAFM_TP, Double.parseDouble(m4.group(1)));
results.setCount(CEAFM_F1, Double.parseDouble(m4.group(2)));
}
Matcher m5 = CEAFE_PATTERN.matcher(line);
if (m5.matches()) {
results.setCount(CEAFE_TP, Double.parseDouble(m5.group(1)));
results.setCount(CEAFE_F1, Double.parseDouble(m5.group(2)));
}
Matcher m6 = BLANC_PATTERN.matcher(line);
if (m6.matches()) {
results.setCount(BLANC_F1, Double.parseDouble(m6.group(1)));
}
Matcher m7 = CONLL_PATTERN.matcher(line);
if (m7.matches()) {
results.setCount(CONLL_SCORE, Double.parseDouble(m7.group(1)));
}
}
return results;
}
Aggregations