use of org.jabref.model.entry.BibEntry in project jabref by JabRef.
the class CrossRef method extractIdentifier.
@Override
public Optional<DOI> extractIdentifier(BibEntry inputEntry, List<BibEntry> fetchedEntries) throws FetcherException {
final String entryTitle = REMOVE_BRACES_FORMATTER.format(inputEntry.getLatexFreeField(FieldName.TITLE).orElse(""));
final StringSimilarity stringSimilarity = new StringSimilarity();
for (BibEntry fetchedEntry : fetchedEntries) {
// currently only title-based comparison
// title
Optional<String> dataTitle = fetchedEntry.getField(FieldName.TITLE);
if (OptionalUtil.isPresentAnd(dataTitle, title -> stringSimilarity.isSimilar(entryTitle, title))) {
return fetchedEntry.getDOI();
}
// subtitle
// additional check, as sometimes subtitle is needed but sometimes only duplicates the title
Optional<String> dataSubtitle = fetchedEntry.getField(FieldName.SUBTITLE);
Optional<String> dataWithSubTitle = OptionalUtil.combine(dataTitle, dataSubtitle, (title, subtitle) -> title + " " + subtitle);
if (OptionalUtil.isPresentAnd(dataWithSubTitle, titleWithSubtitle -> stringSimilarity.isSimilar(entryTitle, titleWithSubtitle))) {
return fetchedEntry.getDOI();
}
}
return Optional.empty();
}
use of org.jabref.model.entry.BibEntry in project jabref by JabRef.
the class MrDLibFetcher method performSearch.
@Override
public List<BibEntry> performSearch(BibEntry entry) throws FetcherException {
Optional<String> title = entry.getLatexFreeField(FieldName.TITLE);
if (title.isPresent()) {
String response = makeServerRequest(title.get());
MrDLibImporter importer = new MrDLibImporter();
ParserResult parserResult = new ParserResult();
try {
if (importer.isRecognizedFormat(new BufferedReader(new StringReader(response)))) {
parserResult = importer.importDatabase(new BufferedReader(new StringReader(response)));
} else {
// For displaying An ErrorMessage
BibEntry errorBibEntry = new BibEntry();
errorBibEntry.setField("html_representation", Localization.lang("Error_while_fetching_from_%0", "Mr.DLib"));
BibDatabase errorBibDataBase = new BibDatabase();
errorBibDataBase.insertEntry(errorBibEntry);
parserResult = new ParserResult(errorBibDataBase);
}
} catch (IOException e) {
LOGGER.error(e.getMessage(), e);
throw new FetcherException("XML Parser IOException.");
}
return parserResult.getDatabase().getEntries();
} else {
// without a title there is no reason to ask MrDLib
return new ArrayList<>(0);
}
}
use of org.jabref.model.entry.BibEntry in project jabref by JabRef.
the class MedlineImporter method parseArticle.
private void parseArticle(PubmedArticle article, List<BibEntry> bibItems) {
Map<String, String> fields = new HashMap<>();
if (article.getPubmedData() != null) {
if (article.getMedlineCitation().getDateRevised() != null) {
DateRevised dateRevised = article.getMedlineCitation().getDateRevised();
addDateRevised(fields, dateRevised);
putIfValueNotNull(fields, "pubstatus", article.getPubmedData().getPublicationStatus());
if (article.getPubmedData().getArticleIdList() != null) {
ArticleIdList articleIdList = article.getPubmedData().getArticleIdList();
addArticleIdList(fields, articleIdList);
}
}
}
if (article.getMedlineCitation() != null) {
MedlineCitation medlineCitation = article.getMedlineCitation();
fields.put("status", medlineCitation.getStatus());
DateCreated dateCreated = medlineCitation.getDateCreated();
fields.put("created", convertToDateFormat(dateCreated.getYear(), dateCreated.getMonth(), dateCreated.getDay()));
fields.put("pubmodel", medlineCitation.getArticle().getPubModel());
if (medlineCitation.getDateCompleted() != null) {
DateCompleted dateCompleted = medlineCitation.getDateCompleted();
fields.put("completed", convertToDateFormat(dateCompleted.getYear(), dateCompleted.getMonth(), dateCompleted.getDay()));
}
fields.put(FieldName.PMID, medlineCitation.getPMID().getContent());
fields.put(FieldName.OWNER, medlineCitation.getOwner());
addArticleInformation(fields, medlineCitation.getArticle().getContent());
MedlineJournalInfo medlineJournalInfo = medlineCitation.getMedlineJournalInfo();
putIfValueNotNull(fields, "country", medlineJournalInfo.getCountry());
putIfValueNotNull(fields, "journal-abbreviation", medlineJournalInfo.getMedlineTA());
putIfValueNotNull(fields, "nlm-id", medlineJournalInfo.getNlmUniqueID());
putIfValueNotNull(fields, "issn-linking", medlineJournalInfo.getISSNLinking());
if (medlineCitation.getChemicalList() != null) {
if (medlineCitation.getChemicalList().getChemical() != null) {
addChemicals(fields, medlineCitation.getChemicalList().getChemical());
}
}
if (medlineCitation.getCitationSubset() != null) {
fields.put("citation-subset", join(medlineCitation.getCitationSubset(), ", "));
}
if (medlineCitation.getGeneSymbolList() != null) {
addGeneSymbols(fields, medlineCitation.getGeneSymbolList());
}
if (medlineCitation.getMeshHeadingList() != null) {
addMeashHeading(fields, medlineCitation.getMeshHeadingList());
}
putIfValueNotNull(fields, "references", medlineCitation.getNumberOfReferences());
if (medlineCitation.getPersonalNameSubjectList() != null) {
addPersonalNames(fields, medlineCitation.getPersonalNameSubjectList());
}
if (medlineCitation.getOtherID() != null) {
addOtherId(fields, medlineCitation.getOtherID());
}
if (medlineCitation.getKeywordList() != null) {
addKeyWords(fields, medlineCitation.getKeywordList());
}
if (medlineCitation.getSpaceFlightMission() != null) {
fields.put("space-flight-mission", join(medlineCitation.getSpaceFlightMission(), ", "));
}
if (medlineCitation.getInvestigatorList() != null) {
addInvestigators(fields, medlineCitation.getInvestigatorList());
}
if (medlineCitation.getGeneralNote() != null) {
addNotes(fields, medlineCitation.getGeneralNote());
}
}
BibEntry entry = new BibEntry("article");
entry.setField(fields);
bibItems.add(entry);
}
use of org.jabref.model.entry.BibEntry in project jabref by JabRef.
the class MedlineImporter method importDatabase.
@Override
public ParserResult importDatabase(BufferedReader reader) throws IOException {
Objects.requireNonNull(reader);
List<BibEntry> bibItems = new ArrayList<>();
try {
JAXBContext context = JAXBContext.newInstance("org.jabref.logic.importer.fileformat.medline");
XMLInputFactory xmlInputFactory = XMLInputFactory.newFactory();
XMLStreamReader xmlStreamReader = xmlInputFactory.createXMLStreamReader(reader);
//go to the root element
while (!xmlStreamReader.isStartElement()) {
xmlStreamReader.next();
}
Unmarshaller unmarshaller = context.createUnmarshaller();
Object unmarshalledObject = unmarshaller.unmarshal(xmlStreamReader);
//check whether we have an article set, an article, a book article or a book article set
if (unmarshalledObject instanceof PubmedArticleSet) {
PubmedArticleSet articleSet = (PubmedArticleSet) unmarshalledObject;
for (Object article : articleSet.getPubmedArticleOrPubmedBookArticle()) {
if (article instanceof PubmedArticle) {
PubmedArticle currentArticle = (PubmedArticle) article;
parseArticle(currentArticle, bibItems);
}
if (article instanceof PubmedBookArticle) {
PubmedBookArticle currentArticle = (PubmedBookArticle) article;
parseBookArticle(currentArticle, bibItems);
}
}
} else if (unmarshalledObject instanceof PubmedArticle) {
PubmedArticle article = (PubmedArticle) unmarshalledObject;
parseArticle(article, bibItems);
} else if (unmarshalledObject instanceof PubmedBookArticle) {
PubmedBookArticle currentArticle = (PubmedBookArticle) unmarshalledObject;
parseBookArticle(currentArticle, bibItems);
} else {
PubmedBookArticleSet bookArticleSet = (PubmedBookArticleSet) unmarshalledObject;
for (PubmedBookArticle bookArticle : bookArticleSet.getPubmedBookArticle()) {
parseBookArticle(bookArticle, bibItems);
}
}
} catch (JAXBException | XMLStreamException e) {
LOGGER.debug("could not parse document", e);
return ParserResult.fromError(e);
}
return new ParserResult(bibItems);
}
use of org.jabref.model.entry.BibEntry in project jabref by JabRef.
the class IsiImporter method importDatabase.
@Override
public ParserResult importDatabase(BufferedReader reader) throws IOException {
Objects.requireNonNull(reader);
List<BibEntry> bibitems = new ArrayList<>();
StringBuilder sb = new StringBuilder();
// Pattern fieldPattern = Pattern.compile("^AU |^TI |^SO |^DT |^C1 |^AB
// |^ID |^BP |^PY |^SE |^PY |^VL |^IS ");
String str;
while ((str = reader.readLine()) != null) {
if (str.length() < 3) {
continue;
}
// beginning of a new item
if ("PT ".equals(str.substring(0, 3))) {
sb.append("::").append(str);
} else {
String beg = str.substring(0, 3).trim();
// quick and dirty and it works!
if (beg.length() == 2) {
// mark the beginning of each field
sb.append(" ## ");
sb.append(str);
} else {
// mark the end of each line
sb.append("EOLEOL");
// remove the initial spaces
sb.append(str.trim());
}
}
}
String[] entries = sb.toString().split("::");
Map<String, String> hm = new HashMap<>();
// skip the first entry as it is either empty or has document header
for (String entry : entries) {
String[] fields = entry.split(" ## ");
if (fields.length == 0) {
fields = entry.split("\n");
}
String Type = "";
String PT = "";
String pages = "";
hm.clear();
for (String field : fields) {
// empty field don't do anything
if (field.length() <= 2) {
continue;
}
String beg = field.substring(0, 2);
String value = field.substring(3);
if (value.startsWith(" - ")) {
value = value.substring(3);
}
value = value.trim();
if ("PT".equals(beg)) {
if (value.startsWith("J")) {
PT = "article";
} else {
PT = value;
}
// make all of them PT?
Type = "article";
} else if ("TY".equals(beg)) {
if ("JOUR".equals(value)) {
Type = "article";
} else if ("CONF".equals(value)) {
Type = "inproceedings";
}
} else if ("JO".equals(beg)) {
hm.put(FieldName.BOOKTITLE, value);
} else if ("AU".equals(beg)) {
String author = IsiImporter.isiAuthorsConvert(value.replace("EOLEOL", " and "));
// if there is already someone there then append with "and"
if (hm.get(FieldName.AUTHOR) != null) {
author = hm.get(FieldName.AUTHOR) + " and " + author;
}
hm.put(FieldName.AUTHOR, author);
} else if ("TI".equals(beg)) {
hm.put(FieldName.TITLE, value.replace("EOLEOL", " "));
} else if ("SO".equals(beg) || "JA".equals(beg)) {
hm.put(FieldName.JOURNAL, value.replace("EOLEOL", " "));
} else if ("ID".equals(beg) || "KW".equals(beg)) {
value = value.replace("EOLEOL", " ");
String existingKeywords = hm.get(FieldName.KEYWORDS);
if ((existingKeywords == null) || existingKeywords.contains(value)) {
existingKeywords = value;
} else {
existingKeywords += ", " + value;
}
hm.put(FieldName.KEYWORDS, existingKeywords);
} else if ("AB".equals(beg)) {
hm.put(FieldName.ABSTRACT, value.replace("EOLEOL", " "));
} else if ("BP".equals(beg) || "BR".equals(beg) || "SP".equals(beg)) {
pages = value;
} else if ("EP".equals(beg)) {
int detpos = value.indexOf(' ');
// tweak for IEEE Explore
if ((detpos != -1) && !value.substring(0, detpos).trim().isEmpty()) {
value = value.substring(0, detpos);
}
pages = pages + "--" + value;
} else if ("PS".equals(beg)) {
pages = IsiImporter.parsePages(value);
} else if ("AR".equals(beg)) {
pages = value;
} else if ("IS".equals(beg)) {
hm.put(FieldName.NUMBER, value);
} else if ("PY".equals(beg)) {
hm.put(FieldName.YEAR, value);
} else if ("VL".equals(beg)) {
hm.put(FieldName.VOLUME, value);
} else if ("PU".equals(beg)) {
hm.put(FieldName.PUBLISHER, value);
} else if ("DI".equals(beg)) {
hm.put(FieldName.DOI, value);
} else if ("PD".equals(beg)) {
String month = IsiImporter.parseMonth(value);
if (month != null) {
hm.put(FieldName.MONTH, month);
}
} else if ("DT".equals(beg)) {
Type = value;
if ("Review".equals(Type)) {
// set "Review" in Note/Comment?
Type = "article";
} else if (Type.startsWith("Article") || Type.startsWith("Journal") || "article".equals(PT)) {
Type = "article";
} else {
Type = BibEntry.DEFAULT_TYPE;
}
} else if ("CR".equals(beg)) {
hm.put("CitedReferences", value.replace("EOLEOL", " ; ").trim());
} else {
// Preserve all other entries except
if ("ER".equals(beg) || "EF".equals(beg) || "VR".equals(beg) || "FN".equals(beg)) {
continue;
}
hm.put(beg.toLowerCase(Locale.ROOT), value);
}
}
if (!"".equals(pages)) {
hm.put(FieldName.PAGES, pages);
}
// Skip empty entries
if (hm.isEmpty()) {
continue;
}
BibEntry b = new BibEntry(Type);
// id assumes an existing database so don't
// Remove empty fields:
List<Object> toRemove = new ArrayList<>();
for (Map.Entry<String, String> field : hm.entrySet()) {
String content = field.getValue();
if ((content == null) || content.trim().isEmpty()) {
toRemove.add(field.getKey());
}
}
for (Object aToRemove : toRemove) {
hm.remove(aToRemove);
}
// Polish entries
IsiImporter.processSubSup(hm);
IsiImporter.processCapitalization(hm);
b.setField(hm);
bibitems.add(b);
}
return new ParserResult(bibitems);
}
Aggregations