use of org.cogcomp.DatastoreException in project cogcomp-nlp by CogComp.
the class TreeGazetteers method init.
/**
* init all the gazetters, mangle each term in a variety of ways.
*
* @param pathToDictionaries the path to the gazetteers.
* @param phrase_length the max length of the phrases we will consider.
* @throws IOException
*/
private void init(int phrase_length, String pathToDictionaries, final Language language) throws IOException {
try {
// check the local file system for it.
File gazDirectory = new File(pathToDictionaries);
String pathToLists = gazDirectory.getPath() + File.separator + "gazetteers" + File.separator + "gazetteers-list.txt";
InputStream stream = ResourceUtilities.loadResource(pathToLists);
if (stream == null) {
logger.info("Loading gazetteers from \"" + pathToLists + "\" using the Minio cache.");
// not in file system or classpath, try Minio.
Datastore dsNoCredentials = new Datastore(new ResourceConfigurator().getDefaultConfig());
gazDirectory = dsNoCredentials.getDirectory("org.cogcomp.gazetteers", "gazetteers", 1.6, false);
stream = new FileInputStream(gazDirectory.getPath() + File.separator + "gazetteers" + File.separator + "gazetteers-list.txt");
} else {
logger.info("Loading gazetteers from \"" + pathToLists + "\" from the local file system.");
}
BufferedReader br = new BufferedReader(new InputStreamReader(stream));
String line;
ArrayList<String> filenames = new ArrayList<>();
while ((line = br.readLine()) != null) filenames.add(line);
// init the dictionaries.
dictionaries = new ArrayList<>(filenames.size());
dictionariesIgnoreCase = new ArrayList<>(filenames.size());
GazetteerTree gaz = new GazetteerTree(phrase_length, new StringSplitterInterface() {
@Override
public String[] split(String line) {
// character tokenization for Chinese
if (language == Language.Chinese) {
String[] chars = new String[line.length()];
for (int i = 0; i < line.length(); i++) chars[i] = String.valueOf(line.charAt(i));
return chars;
} else
return line.split("[\\s]+");
}
@Override
public final String normalize(String term) {
return term;
}
});
GazetteerTree gazIC = new GazetteerTree(phrase_length, new StringSplitterInterface() {
@Override
public String[] split(String line) {
String tmp = line.toLowerCase();
if (tmp.equals("in") || tmp.equals("on") || tmp.equals("us") || tmp.equals("or") || tmp.equals("am"))
return new String[0];
else {
// character tokenization for Chinese
if (language == Language.Chinese) {
String[] chars = new String[line.length()];
for (int i = 0; i < line.length(); i++) chars[i] = String.valueOf(line.charAt(i));
return chars;
} else
return normalize(line).split("[\\s]+");
}
}
@Override
public String normalize(String term) {
return term.toLowerCase();
}
});
// for each dictionary, compile each of the gaz trees for each phrase permutation.
for (String file : filenames) {
String fileName = gazDirectory.getAbsolutePath() + File.separator + file;
gaz.readDictionary(file, "", ResourceUtilities.loadResource(fileName));
gazIC.readDictionary(file, "(IC)", ResourceUtilities.loadResource(fileName));
}
gaz.trimToSize();
gazIC.trimToSize();
dictionaries.add(gaz);
dictionariesIgnoreCase.add(gazIC);
logger.info("Gazetteers from \"" + pathToLists + "\" are loaded.");
} catch (InvalidPortException | InvalidEndpointException e) {
e.printStackTrace();
} catch (DatastoreException e) {
e.printStackTrace();
}
}
use of org.cogcomp.DatastoreException in project cogcomp-nlp by CogComp.
the class PathLSTMHandler method initialize.
@Override
public void initialize(ResourceManager rm) {
try {
// TODO: move the end-point url to the resource configurator
Datastore ds = new Datastore("http://smaug.cs.illinois.edu:8080");
File lemmaModel = ds.getFile("org.cogcomp.mate-tools", "CoNLL2009-ST-English-ALL.anna.lemmatizer.model", 3.3, false);
File parserModel = ds.getFile("org.cogcomp.mate-tools", "CoNLL2009-ST-English-ALL.anna.parser.model", 3.3, false);
File posModel = ds.getFile("org.cogcomp.mate-tools", "CoNLL2009-ST-English-ALL.anna.postagger.model", 3.3, false);
File pathLSTM = ds.getFile("uk.ac.ed.inf", "pathLSTM.model", 1.0, false);
// SRL pipeline options (currently hard-coded)
String[] args = new String[] { "eng", "-lemma", lemmaModel.getAbsolutePath(), "-parser", parserModel.getAbsolutePath(), "-tagger", posModel.getAbsolutePath(), "-srl", pathLSTM.getAbsolutePath(), "-reranker", "-externalNNs" };
CompletePipelineCMDLineOptions options = new CompletePipelineCMDLineOptions();
options.parseCmdLineArgs(args);
try {
SRLpipeline = CompletePipeline.getCompletePipeline(options);
} catch (ClassNotFoundException | IOException e) {
// TODO Auto-generated catch block
e.printStackTrace();
}
} catch (DatastoreException e) {
e.printStackTrace();
}
try {
this.propBankManager = new FramesManager(true);
this.nomBankManager = new FramesManager(false);
} catch (InvalidPortException | InvalidEndpointException | DatastoreException e) {
e.printStackTrace();
}
}
use of org.cogcomp.DatastoreException in project cogcomp-nlp by CogComp.
the class ExtentTester method testExtentOnPredictedHead.
public static void testExtentOnPredictedHead() throws InvalidPortException, InvalidEndpointException, DatastoreException, IOException, JWNLException {
WordNetManager wordNet = null;
Gazetteers gazetteers = null;
BrownClusters brownClusters = null;
try {
WordNetManager.loadConfigAsClasspathResource(true);
wordNet = WordNetManager.getInstance();
Datastore ds = new Datastore(new ResourceConfigurator().getDefaultConfig());
File gazetteersResource = ds.getDirectory("org.cogcomp.gazetteers", "gazetteers", 1.3, false);
gazetteers = GazetteersFactory.get(5, gazetteersResource.getPath() + File.separator + "gazetteers", true, Language.English);
Vector<String> bcs = new Vector<>();
bcs.add("brown-clusters" + File.separator + "brown-english-wikitext.case-intact.txt-c1000-freq10-v3.txt");
bcs.add("brown-clusters" + File.separator + "brownBllipClusters");
bcs.add("brown-clusters" + File.separator + "brown-rcv1.clean.tokenized-CoNLL03.txt-c1000-freq1.txt");
Vector<Integer> bcst = new Vector<>();
bcst.add(5);
bcst.add(5);
bcst.add(5);
Vector<Boolean> bcsl = new Vector<>();
bcsl.add(false);
bcsl.add(false);
bcsl.add(false);
brownClusters = BrownClusters.get(bcs, bcst, bcsl);
} catch (Exception e) {
e.printStackTrace();
}
int total_mention_predicted = 0;
int total_mention_labeled = 0;
int total_mention_head_correct = 0;
int total_mention_extent_correct = 0;
for (int i = 0; i < 5; i++) {
BIOReader h_train_parser_nam = new BIOReader("data/partition_with_dev/train/" + i, "ACE05-TRAIN", "NAM", false);
BIOReader h_train_parser_nom = new BIOReader("data/partition_with_dev/train/" + i, "ACE05-TRAIN", "NOM", false);
BIOReader h_train_parser_pro = new BIOReader("data/partition_with_dev/train/" + i, "ACE05-TRAIN", "PRO", false);
bio_classifier_nam h_classifier_nam = BIOTester.train_nam_classifier(h_train_parser_nam);
bio_classifier_nom h_classifier_nom = BIOTester.train_nom_classifier(h_train_parser_nom);
bio_classifier_pro h_classifier_pro = BIOTester.train_pro_classifier(h_train_parser_pro);
Learner[] h_candidates = new Learner[3];
h_candidates[0] = h_classifier_nam;
h_candidates[1] = h_classifier_nom;
h_candidates[2] = h_classifier_pro;
ExtentReader e_train_parser = new ExtentReader("data/partition_with_dev/train/" + i);
extent_classifier e_classifier = train_extent_classifier(e_train_parser);
BIOReader test_parser = new BIOReader("data/partition_with_dev/eval/" + i, "ACE05-EVAL", "ALL", false);
test_parser.reset();
String preBIOLevel1 = "";
String preBIOLevel2 = "";
List<Constituent> predictedHeads = new ArrayList<>();
List<Constituent> predictedMentions = new ArrayList<>();
for (Object example = test_parser.next(); example != null; example = test_parser.next()) {
((Constituent) example).addAttribute("preBIOLevel1", preBIOLevel1);
((Constituent) example).addAttribute("preBIOLevel2", preBIOLevel2);
Pair<String, Integer> h_prediction = BIOTester.joint_inference((Constituent) example, h_candidates);
String bioTag = h_prediction.getFirst();
if (bioTag.startsWith("B") || bioTag.startsWith("U")) {
Constituent predictMention = BIOTester.getConstituent((Constituent) example, h_candidates[h_prediction.getSecond()], false);
predictedHeads.add(predictMention);
}
preBIOLevel2 = preBIOLevel1;
preBIOLevel1 = bioTag;
}
for (Constituent head : predictedHeads) {
Constituent mention = getFullMention(e_classifier, head, gazetteers, brownClusters, wordNet);
predictedMentions.add(mention);
}
List<Constituent> goldMentions = new ArrayList<>();
ACEReader aceReader = null;
try {
aceReader = new ACEReader("data/partition_with_dev/eval/" + i, false);
} catch (Exception e) {
e.printStackTrace();
}
for (TextAnnotation ta : aceReader) {
goldMentions.addAll(ta.getView(ViewNames.MENTION_ACE).getConstituents());
}
total_mention_labeled += goldMentions.size();
total_mention_predicted += predictedMentions.size();
for (Constituent p : predictedMentions) {
Constituent ph = getPredictedMentionHead(p);
for (Constituent g : goldMentions) {
if (!p.getTextAnnotation().getText().equals(g.getTextAnnotation().getText())) {
continue;
}
Constituent gh = ACEReader.getEntityHeadForConstituent(g, g.getTextAnnotation(), "TESTG");
try {
if (ph.getStartSpan() == gh.getStartSpan() && ph.getEndSpan() == gh.getEndSpan()) {
total_mention_head_correct++;
if (g.getStartSpan() == p.getStartSpan() && g.getEndSpan() == p.getEndSpan()) {
total_mention_extent_correct++;
}
break;
}
} catch (Exception e) {
e.printStackTrace();
}
}
}
}
System.out.println("Total labeled mention: " + total_mention_labeled);
System.out.println("Total predicted mention: " + total_mention_predicted);
System.out.println("Total head correct: " + total_mention_head_correct);
System.out.println("Total extent correct: " + total_mention_extent_correct);
}
use of org.cogcomp.DatastoreException in project cogcomp-nlp by CogComp.
the class ExtentTester method testExtentOnGoldHead.
public static void testExtentOnGoldHead() throws InvalidPortException, InvalidEndpointException, IOException, JWNLException, DatastoreException {
int labeled = 0;
int correct = 0;
POSAnnotator posAnnotator = null;
WordNetManager wordNet = null;
Gazetteers gazetteers = null;
BrownClusters brownClusters = null;
try {
WordNetManager.loadConfigAsClasspathResource(true);
wordNet = WordNetManager.getInstance();
posAnnotator = new POSAnnotator();
Datastore ds = new Datastore(new ResourceConfigurator().getDefaultConfig());
File gazetteersResource = ds.getDirectory("org.cogcomp.gazetteers", "gazetteers", 1.3, false);
gazetteers = GazetteersFactory.get(5, gazetteersResource.getPath() + File.separator + "gazetteers", true, Language.English);
Vector<String> bcs = new Vector<>();
bcs.add("brown-clusters" + File.separator + "brown-english-wikitext.case-intact.txt-c1000-freq10-v3.txt");
bcs.add("brown-clusters" + File.separator + "brownBllipClusters");
bcs.add("brown-clusters" + File.separator + "brown-rcv1.clean.tokenized-CoNLL03.txt-c1000-freq1.txt");
Vector<Integer> bcst = new Vector<>();
bcst.add(5);
bcst.add(5);
bcst.add(5);
Vector<Boolean> bcsl = new Vector<>();
bcsl.add(false);
bcsl.add(false);
bcsl.add(false);
brownClusters = BrownClusters.get(bcs, bcst, bcsl);
} catch (Exception e) {
e.printStackTrace();
}
for (int i = 0; i < 1; i++) {
ExtentReader train_parser = new ExtentReader("data/partition_with_dev/train/" + i, "COMBINED-ALL-TRAIN-" + i);
extent_classifier classifier = train_extent_classifier(train_parser);
BIOCombinedReader bioCombinedReader = null;
try {
bioCombinedReader = new BIOCombinedReader(i, "ALL-EVAL", "ALL", true);
} catch (Exception e) {
e.printStackTrace();
}
for (Object ota = bioCombinedReader.next(); ota != null; ota = bioCombinedReader.next()) {
TextAnnotation ta = (TextAnnotation) ota;
try {
ta.addView(posAnnotator);
} catch (Exception e) {
e.printStackTrace();
}
String mentionViewName = ViewNames.MENTION_ERE;
if (ta.getId().startsWith("bn") || ta.getId().startsWith("nw")) {
mentionViewName = ViewNames.MENTION_ACE;
}
View mentionView = ta.getView(mentionViewName);
for (Constituent mention : mentionView.getConstituents()) {
Constituent head = ACEReader.getEntityHeadForConstituent(mention, ta, "HEADS");
if (head == null) {
continue;
}
labeled++;
Constituent predictedFullMention = getFullMention(classifier, head, gazetteers, brownClusters, wordNet);
if (predictedFullMention.getStartSpan() == mention.getStartSpan() && predictedFullMention.getEndSpan() == mention.getEndSpan()) {
correct++;
} else {
System.out.println("Gold: " + mention.toString());
System.out.println("Predicted: " + predictedFullMention.toString());
}
}
}
}
System.out.println("Labeled: " + labeled);
System.out.println("Correct: " + correct);
System.out.println("Correctness: " + (double) correct * 100.0 / (double) labeled);
}
Aggregations