use of org.jabref.logic.importer.ParserResult in project jabref by JabRef.
the class JabRefMessageHandler method handleMessage.
@Override
public void handleMessage(String message) {
ArgumentProcessor argumentProcessor = new ArgumentProcessor(message.split("\n"), ArgumentProcessor.Mode.REMOTE_START);
if (!(argumentProcessor.hasParserResults())) {
throw new IllegalStateException("Could not start JabRef with arguments " + message);
}
List<ParserResult> loaded = argumentProcessor.getParserResults();
for (int i = 0; i < loaded.size(); i++) {
ParserResult pr = loaded.get(i);
JabRefGUI.getMainFrame().addParserResult(pr, i == 0);
}
}
use of org.jabref.logic.importer.ParserResult in project jabref by JabRef.
the class GoogleScholar method downloadEntry.
private BibEntry downloadEntry(String link) throws IOException, FetcherException {
String downloadedContent = new URLDownload(link).asString();
BibtexParser parser = new BibtexParser(importFormatPreferences);
ParserResult result = parser.parse(new StringReader(downloadedContent));
if ((result == null) || (result.getDatabase() == null)) {
throw new FetcherException("Parsing entries from Google Scholar bib file failed.");
} else {
Collection<BibEntry> entries = result.getDatabase().getEntries();
if (entries.size() != 1) {
LOGGER.debug(entries.size() + " entries found! (" + link + ")");
throw new FetcherException("Parsing entries from Google Scholar bib file failed.");
} else {
BibEntry entry = entries.iterator().next();
return entry;
}
}
}
use of org.jabref.logic.importer.ParserResult in project jabref by JabRef.
the class MrDLibFetcher method performSearch.
@Override
public List<BibEntry> performSearch(BibEntry entry) throws FetcherException {
Optional<String> title = entry.getLatexFreeField(FieldName.TITLE);
if (title.isPresent()) {
String response = makeServerRequest(title.get());
MrDLibImporter importer = new MrDLibImporter();
ParserResult parserResult = new ParserResult();
try {
if (importer.isRecognizedFormat(new BufferedReader(new StringReader(response)))) {
parserResult = importer.importDatabase(new BufferedReader(new StringReader(response)));
} else {
// For displaying An ErrorMessage
BibEntry errorBibEntry = new BibEntry();
errorBibEntry.setField("html_representation", Localization.lang("Error_while_fetching_from_%0", "Mr.DLib"));
BibDatabase errorBibDataBase = new BibDatabase();
errorBibDataBase.insertEntry(errorBibEntry);
parserResult = new ParserResult(errorBibDataBase);
}
} catch (IOException e) {
LOGGER.error(e.getMessage(), e);
throw new FetcherException("XML Parser IOException.");
}
return parserResult.getDatabase().getEntries();
} else {
// without a title there is no reason to ask MrDLib
return new ArrayList<>(0);
}
}
use of org.jabref.logic.importer.ParserResult in project jabref by JabRef.
the class MedlineImporter method importDatabase.
@Override
public ParserResult importDatabase(BufferedReader reader) throws IOException {
Objects.requireNonNull(reader);
List<BibEntry> bibItems = new ArrayList<>();
try {
JAXBContext context = JAXBContext.newInstance("org.jabref.logic.importer.fileformat.medline");
XMLInputFactory xmlInputFactory = XMLInputFactory.newFactory();
XMLStreamReader xmlStreamReader = xmlInputFactory.createXMLStreamReader(reader);
//go to the root element
while (!xmlStreamReader.isStartElement()) {
xmlStreamReader.next();
}
Unmarshaller unmarshaller = context.createUnmarshaller();
Object unmarshalledObject = unmarshaller.unmarshal(xmlStreamReader);
//check whether we have an article set, an article, a book article or a book article set
if (unmarshalledObject instanceof PubmedArticleSet) {
PubmedArticleSet articleSet = (PubmedArticleSet) unmarshalledObject;
for (Object article : articleSet.getPubmedArticleOrPubmedBookArticle()) {
if (article instanceof PubmedArticle) {
PubmedArticle currentArticle = (PubmedArticle) article;
parseArticle(currentArticle, bibItems);
}
if (article instanceof PubmedBookArticle) {
PubmedBookArticle currentArticle = (PubmedBookArticle) article;
parseBookArticle(currentArticle, bibItems);
}
}
} else if (unmarshalledObject instanceof PubmedArticle) {
PubmedArticle article = (PubmedArticle) unmarshalledObject;
parseArticle(article, bibItems);
} else if (unmarshalledObject instanceof PubmedBookArticle) {
PubmedBookArticle currentArticle = (PubmedBookArticle) unmarshalledObject;
parseBookArticle(currentArticle, bibItems);
} else {
PubmedBookArticleSet bookArticleSet = (PubmedBookArticleSet) unmarshalledObject;
for (PubmedBookArticle bookArticle : bookArticleSet.getPubmedBookArticle()) {
parseBookArticle(bookArticle, bibItems);
}
}
} catch (JAXBException | XMLStreamException e) {
LOGGER.debug("could not parse document", e);
return ParserResult.fromError(e);
}
return new ParserResult(bibItems);
}
use of org.jabref.logic.importer.ParserResult in project jabref by JabRef.
the class IsiImporter method importDatabase.
@Override
public ParserResult importDatabase(BufferedReader reader) throws IOException {
Objects.requireNonNull(reader);
List<BibEntry> bibitems = new ArrayList<>();
StringBuilder sb = new StringBuilder();
// Pattern fieldPattern = Pattern.compile("^AU |^TI |^SO |^DT |^C1 |^AB
// |^ID |^BP |^PY |^SE |^PY |^VL |^IS ");
String str;
while ((str = reader.readLine()) != null) {
if (str.length() < 3) {
continue;
}
// beginning of a new item
if ("PT ".equals(str.substring(0, 3))) {
sb.append("::").append(str);
} else {
String beg = str.substring(0, 3).trim();
// quick and dirty and it works!
if (beg.length() == 2) {
// mark the beginning of each field
sb.append(" ## ");
sb.append(str);
} else {
// mark the end of each line
sb.append("EOLEOL");
// remove the initial spaces
sb.append(str.trim());
}
}
}
String[] entries = sb.toString().split("::");
Map<String, String> hm = new HashMap<>();
// skip the first entry as it is either empty or has document header
for (String entry : entries) {
String[] fields = entry.split(" ## ");
if (fields.length == 0) {
fields = entry.split("\n");
}
String Type = "";
String PT = "";
String pages = "";
hm.clear();
for (String field : fields) {
// empty field don't do anything
if (field.length() <= 2) {
continue;
}
String beg = field.substring(0, 2);
String value = field.substring(3);
if (value.startsWith(" - ")) {
value = value.substring(3);
}
value = value.trim();
if ("PT".equals(beg)) {
if (value.startsWith("J")) {
PT = "article";
} else {
PT = value;
}
// make all of them PT?
Type = "article";
} else if ("TY".equals(beg)) {
if ("JOUR".equals(value)) {
Type = "article";
} else if ("CONF".equals(value)) {
Type = "inproceedings";
}
} else if ("JO".equals(beg)) {
hm.put(FieldName.BOOKTITLE, value);
} else if ("AU".equals(beg)) {
String author = IsiImporter.isiAuthorsConvert(value.replace("EOLEOL", " and "));
// if there is already someone there then append with "and"
if (hm.get(FieldName.AUTHOR) != null) {
author = hm.get(FieldName.AUTHOR) + " and " + author;
}
hm.put(FieldName.AUTHOR, author);
} else if ("TI".equals(beg)) {
hm.put(FieldName.TITLE, value.replace("EOLEOL", " "));
} else if ("SO".equals(beg) || "JA".equals(beg)) {
hm.put(FieldName.JOURNAL, value.replace("EOLEOL", " "));
} else if ("ID".equals(beg) || "KW".equals(beg)) {
value = value.replace("EOLEOL", " ");
String existingKeywords = hm.get(FieldName.KEYWORDS);
if ((existingKeywords == null) || existingKeywords.contains(value)) {
existingKeywords = value;
} else {
existingKeywords += ", " + value;
}
hm.put(FieldName.KEYWORDS, existingKeywords);
} else if ("AB".equals(beg)) {
hm.put(FieldName.ABSTRACT, value.replace("EOLEOL", " "));
} else if ("BP".equals(beg) || "BR".equals(beg) || "SP".equals(beg)) {
pages = value;
} else if ("EP".equals(beg)) {
int detpos = value.indexOf(' ');
// tweak for IEEE Explore
if ((detpos != -1) && !value.substring(0, detpos).trim().isEmpty()) {
value = value.substring(0, detpos);
}
pages = pages + "--" + value;
} else if ("PS".equals(beg)) {
pages = IsiImporter.parsePages(value);
} else if ("AR".equals(beg)) {
pages = value;
} else if ("IS".equals(beg)) {
hm.put(FieldName.NUMBER, value);
} else if ("PY".equals(beg)) {
hm.put(FieldName.YEAR, value);
} else if ("VL".equals(beg)) {
hm.put(FieldName.VOLUME, value);
} else if ("PU".equals(beg)) {
hm.put(FieldName.PUBLISHER, value);
} else if ("DI".equals(beg)) {
hm.put(FieldName.DOI, value);
} else if ("PD".equals(beg)) {
String month = IsiImporter.parseMonth(value);
if (month != null) {
hm.put(FieldName.MONTH, month);
}
} else if ("DT".equals(beg)) {
Type = value;
if ("Review".equals(Type)) {
// set "Review" in Note/Comment?
Type = "article";
} else if (Type.startsWith("Article") || Type.startsWith("Journal") || "article".equals(PT)) {
Type = "article";
} else {
Type = BibEntry.DEFAULT_TYPE;
}
} else if ("CR".equals(beg)) {
hm.put("CitedReferences", value.replace("EOLEOL", " ; ").trim());
} else {
// Preserve all other entries except
if ("ER".equals(beg) || "EF".equals(beg) || "VR".equals(beg) || "FN".equals(beg)) {
continue;
}
hm.put(beg.toLowerCase(Locale.ROOT), value);
}
}
if (!"".equals(pages)) {
hm.put(FieldName.PAGES, pages);
}
// Skip empty entries
if (hm.isEmpty()) {
continue;
}
BibEntry b = new BibEntry(Type);
// id assumes an existing database so don't
// Remove empty fields:
List<Object> toRemove = new ArrayList<>();
for (Map.Entry<String, String> field : hm.entrySet()) {
String content = field.getValue();
if ((content == null) || content.trim().isEmpty()) {
toRemove.add(field.getKey());
}
}
for (Object aToRemove : toRemove) {
hm.remove(aToRemove);
}
// Polish entries
IsiImporter.processSubSup(hm);
IsiImporter.processCapitalization(hm);
b.setField(hm);
bibitems.add(b);
}
return new ParserResult(bibitems);
}
Aggregations