use of org.jabref.logic.importer.FetcherException in project jabref by JabRef.
the class ArXiv method callApi.
/**
* Queries the API.
*
* If only {@code searchQuery} is given, then the API will return results for each article that matches the query.
* If only {@code ids} is given, then the API will return results for each article in the list.
* If both {@code searchQuery} and {@code ids} are given, then the API will return each article in
* {@code ids} that matches {@code searchQuery}. This allows the API to act as a results filter.
*
* @param searchQuery the search query used to find articles;
* <a href="http://arxiv.org/help/api/user-manual#query_details">details</a>
* @param ids a list of arXiv identifiers
* @param start the index of the first returned result (zero-based)
* @param maxResults the number of maximal results (has to be smaller than 2000)
* @return the response from the API as a XML document (Atom 1.0)
* @throws FetcherException if there was a problem while building the URL or the API was not accessible
*/
private Document callApi(String searchQuery, List<ArXivIdentifier> ids, int start, int maxResults) throws FetcherException {
if (maxResults > 2000) {
throw new IllegalArgumentException("The arXiv API limits the number of maximal results to be 2000");
}
try {
URIBuilder uriBuilder = new URIBuilder(API_URL);
// The arXiv API has problems with accents, so we remove them (i.e. Fréchet -> Frechet)
if (StringUtil.isNotBlank(searchQuery)) {
uriBuilder.addParameter("search_query", StringUtil.stripAccents(searchQuery));
}
if (!ids.isEmpty()) {
uriBuilder.addParameter("id_list", ids.stream().map(ArXivIdentifier::getNormalized).collect(Collectors.joining(",")));
}
uriBuilder.addParameter("start", String.valueOf(start));
uriBuilder.addParameter("max_results", String.valueOf(maxResults));
URL url = uriBuilder.build().toURL();
DocumentBuilderFactory factory = DocumentBuilderFactory.newInstance();
DocumentBuilder builder = factory.newDocumentBuilder();
HttpURLConnection connection = (HttpURLConnection) url.openConnection();
if (connection.getResponseCode() == 400) {
// Bad request error from server, try to get more information
throw getException(builder.parse(connection.getErrorStream()));
} else {
return builder.parse(connection.getInputStream());
}
} catch (SAXException | ParserConfigurationException | IOException | URISyntaxException exception) {
throw new FetcherException("arXiv API request failed", exception);
}
}
use of org.jabref.logic.importer.FetcherException in project jabref by JabRef.
the class ArXiv method getException.
private FetcherException getException(Document error) {
List<Node> entries = XMLUtil.asList(error.getElementsByTagName("entry"));
// </entry>
if (entries.size() == 1) {
Node node = entries.get(0);
Optional<String> id = XMLUtil.getNodeContent(node, "id");
Boolean isError = id.map(idContent -> idContent.startsWith("http://arxiv.org/api/errors")).orElse(false);
if (isError) {
String errorMessage = XMLUtil.getNodeContent(node, "summary").orElse("Unknown error");
return new FetcherException(errorMessage);
}
}
return new FetcherException("arXiv API request failed");
}
use of org.jabref.logic.importer.FetcherException in project jabref by JabRef.
the class DoiFetcher method performSearchById.
@Override
public Optional<BibEntry> performSearchById(String identifier) throws FetcherException {
Optional<DOI> doi = DOI.parse(identifier);
try {
if (doi.isPresent()) {
URL doiURL = new URL(doi.get().getURIAsASCIIString());
// BibTeX data
URLDownload download = new URLDownload(doiURL);
download.addHeader("Accept", "application/x-bibtex");
String bibtexString = download.asString();
// BibTeX entry
Optional<BibEntry> fetchedEntry = BibtexParser.singleFromString(bibtexString, preferences);
fetchedEntry.ifPresent(this::doPostCleanup);
return fetchedEntry;
} else {
throw new FetcherException(Localization.lang("Invalid_DOI:_'%0'.", identifier));
}
} catch (IOException e) {
throw new FetcherException(Localization.lang("Connection error"), e);
} catch (ParseException e) {
throw new FetcherException("Could not parse BibTeX entry", e);
}
}
use of org.jabref.logic.importer.FetcherException in project jabref by JabRef.
the class AstrophysicsDataSystem method performSearch.
@Override
public List<BibEntry> performSearch(String query) throws FetcherException {
if (StringUtil.isBlank(query)) {
return Collections.emptyList();
}
try {
URLConnection connection = getURLForQuery(query).openConnection();
connection.setRequestProperty("User-Agent", URLDownload.USER_AGENT);
try (InputStream stream = connection.getInputStream()) {
List<BibEntry> fetchedEntries = getParser().parseEntries(stream);
// Post-cleanup
fetchedEntries.forEach(this::doPostCleanup);
return fetchedEntries;
} catch (IOException e) {
throw new FetcherException("An I/O exception occurred", e);
}
} catch (URISyntaxException | MalformedURLException e) {
throw new FetcherException("Search URI is malformed", e);
} catch (IOException e) {
throw new FetcherException("An I/O exception occurred", e);
} catch (ParseException e) {
throw new FetcherException("Error occurred when parsing entry", Localization.lang("Error occurred when parsing entry"), e);
}
}
use of org.jabref.logic.importer.FetcherException in project jabref by JabRef.
the class GoogleScholar method obtainAndModifyCookie.
private void obtainAndModifyCookie() throws FetcherException {
try {
URLDownload downloader = new URLDownload("https://scholar.google.com");
List<HttpCookie> cookies = downloader.getCookieFromUrl();
for (HttpCookie cookie : cookies) {
// append "CF=4" which represents "Citation format bibtex"
cookie.setValue(cookie.getValue() + ":CF=4");
}
} catch (IOException e) {
throw new FetcherException("Cookie configuration for Google Scholar failed.", e);
}
}
Aggregations