use of org.wikipediacleaner.api.APIException in project wpcleaner by WPCleaner.
the class ApiXmlAllMessagesResult method executeMessages.
/**
* Execute messages request.
*
* @param properties Properties defining request.
* @param messages Map of messages to be filled with the results.
* @return True if request should be continued.
* @throws APIException Exception thrown by the API.
*/
@Override
public boolean executeMessages(Map<String, String> properties, Map<String, String> messages) throws APIException {
try {
Element root = getRoot(properties, ApiRequest.MAX_ATTEMPTS);
// Retrieve general information
XPathExpression<Element> xpa = XPathFactory.instance().compile("/api/query/allmessages/message", Filters.element());
List<Element> listMessages = xpa.evaluate(root);
Iterator<Element> itMessages = listMessages.iterator();
while (itMessages.hasNext()) {
Element message = itMessages.next();
String name = message.getAttributeValue("name");
String text = message.getText().trim();
messages.put(name, text);
}
// Retrieve continue
return shouldContinue(root, "/api/query-continue/allmessages", properties);
} catch (JDOMException e) {
log.error("Error loading messages", e);
throw new APIException("Error parsing XML", e);
}
}
use of org.wikipediacleaner.api.APIException in project wpcleaner by WPCleaner.
the class ApiXmlSiteInfoResult method executeSiteInformation.
/**
* Execute site information request.
*
* @param properties Properties defining request.
* @throws APIException Exception thrown by the API.
*/
@Override
public void executeSiteInformation(Map<String, String> properties) throws APIException {
try {
Element root = getRoot(properties, ApiRequest.MAX_ATTEMPTS);
WikiConfiguration wikiConfiguration = getWiki().getWikiConfiguration();
// Retrieve general information
XPathExpression<Element> xpa = XPathFactory.instance().compile("/api/query/general", Filters.element());
Element generalNode = xpa.evaluateFirst(root);
if (generalNode != null) {
wikiConfiguration.setArticlePath(generalNode.getAttributeValue("articlepath"));
wikiConfiguration.setMaxArticleSize(generalNode.getAttributeValue("maxarticlesize"));
wikiConfiguration.setScript(generalNode.getAttributeValue("script"));
wikiConfiguration.setServer(generalNode.getAttributeValue("server"));
}
// Retrieve name spaces
HashMap<Integer, Namespace> namespaces = null;
xpa = XPathFactory.instance().compile("/api/query/namespaces/ns", Filters.element());
List<Element> results = xpa.evaluate(root);
Iterator<Element> iter = results.iterator();
namespaces = new HashMap<>();
while (iter.hasNext()) {
Element currentNode = iter.next();
String title = currentNode.getText();
String canonical = currentNode.getAttributeValue("canonical");
String id = currentNode.getAttributeValue("id");
EnumCaseSensitiveness caseSensitiveness = EnumCaseSensitiveness.getCase(currentNode.getAttributeValue("case"));
boolean subPages = (currentNode.getAttribute("subpages") != null);
Namespace ns = new Namespace(id, title, canonical, caseSensitiveness, subPages);
namespaces.put(ns.getId(), ns);
}
// Retrieve name space aliases
xpa = XPathFactory.instance().compile("/api/query/namespacealiases/ns", Filters.element());
results = xpa.evaluate(root);
iter = results.iterator();
while (iter.hasNext()) {
Element currentNode = iter.next();
Integer nsId = null;
try {
nsId = Integer.parseInt(currentNode.getAttributeValue("id"));
Namespace namespace = namespaces.get(nsId);
if (namespace != null) {
namespace.addAlias(currentNode.getText());
}
} catch (NumberFormatException e) {
//
}
}
// Update name space list
LinkedList<Namespace> list = new LinkedList<>(namespaces.values());
wikiConfiguration.setNamespaces(list);
// Retrieve languages
List<Language> languages = new ArrayList<>();
xpa = XPathFactory.instance().compile("/api/query/languages/lang", Filters.element());
results = xpa.evaluate(root);
iter = results.iterator();
while (iter.hasNext()) {
Element currentNode = iter.next();
String code = currentNode.getAttributeValue("code");
String name = currentNode.getText();
languages.add(new Language(code, name));
}
wikiConfiguration.setLanguages(languages);
// Retrieve interwikis
List<Interwiki> interwikis = new ArrayList<>();
xpa = XPathFactory.instance().compile("/api/query/interwikimap/iw", Filters.element());
results = xpa.evaluate(root);
iter = results.iterator();
while (iter.hasNext()) {
Element currentNode = iter.next();
String prefix = currentNode.getAttributeValue("prefix");
boolean local = (currentNode.getAttribute("local") != null);
String language = currentNode.getAttributeValue("language");
String url = currentNode.getAttributeValue("url");
interwikis.add(new Interwiki(prefix, local, language, url));
}
wikiConfiguration.setInterwikis(interwikis);
// Retrieve magic words
List<MagicWord> magicWords = new ArrayList<>();
xpa = XPathFactory.instance().compile("/api/query/magicwords/magicword", Filters.element());
results = xpa.evaluate(root);
iter = results.iterator();
XPathExpression<Element> xpaAlias = XPathFactory.instance().compile("./aliases/alias", Filters.element());
while (iter.hasNext()) {
Element currentNode = iter.next();
String magicWord = currentNode.getAttributeValue("name");
List<String> aliases = new ArrayList<>();
List<Element> resultsAlias = xpaAlias.evaluate(currentNode);
Iterator<Element> iterAlias = resultsAlias.iterator();
while (iterAlias.hasNext()) {
Element currentAlias = iterAlias.next();
String alias = currentAlias.getText();
aliases.add(alias);
}
boolean caseSensitive = (currentNode.getAttribute("case-sensitive") != null);
magicWords.add(new MagicWord(magicWord, aliases, caseSensitive));
}
wikiConfiguration.setMagicWords(magicWords);
// Retrieve special page aliases
Map<String, SpecialPage> specialPages = new HashMap<>();
xpa = XPathFactory.instance().compile("/api/query/specialpagealiases/specialpage", Filters.element());
results = xpa.evaluate(root);
iter = results.iterator();
while (iter.hasNext()) {
Element currentNode = iter.next();
String specialPage = currentNode.getAttributeValue("realname");
List<String> aliases = new ArrayList<>();
List<Element> resultsAlias = xpaAlias.evaluate(currentNode);
Iterator<Element> iterAlias = resultsAlias.iterator();
while (iterAlias.hasNext()) {
Element currentAlias = iterAlias.next();
String alias = currentAlias.getText();
aliases.add(alias);
}
specialPages.put(specialPage, new SpecialPage(specialPage, aliases));
}
wikiConfiguration.setSpecialPages(specialPages);
// Retrieve linter configuration
List<LinterCategory> linterCategories = new ArrayList<>();
xpa = XPathFactory.instance().compile("/api/query/general/linter/*", Filters.element());
results = xpa.evaluate(root);
iter = results.iterator();
while (iter.hasNext()) {
Element currentNode = iter.next();
String level = currentNode.getName();
for (Element child : currentNode.getChildren()) {
linterCategories.add(new LinterCategory(level, child.getTextTrim()));
}
}
wikiConfiguration.setLinterCategories(linterCategories);
// Retrieve extensions
xpa = XPathFactory.instance().compile("/api/query/extensions/ext", Filters.element());
results = xpa.evaluate(root);
iter = results.iterator();
while (iter.hasNext()) {
Element currentNode = iter.next();
String name = currentNode.getAttributeValue("name");
if ((name != null) && (name.equals("Translate"))) {
wikiConfiguration.setTranslatable(true);
}
}
} catch (JDOMException e) {
log.error("Error loading namespaces", e);
throw new APIException("Error parsing XML", e);
}
}
use of org.wikipediacleaner.api.APIException in project wpcleaner by WPCleaner.
the class ApiJsonResult method getRoot.
/**
* Send a request to MediaWiki API.
*
* @param properties Properties defining the request.
* @param maxTry Maximum number of tries.
* @return Answer of MediaWiki API.
* @throws APIException Exception thrown by the API.
*/
protected JsonNode getRoot(Map<String, String> properties, int maxTry) throws APIException {
int attempt = 0;
for (; ; ) {
JsonNode root = null;
HttpMethod method = null;
InputStream stream = null;
try {
// Executing HTTP method
attempt++;
method = createHttpMethod(properties);
int statusCode = getHttpClient().executeMethod(method);
// Accessing response
stream = method.getResponseBodyAsStream();
stream = new BufferedInputStream(stream);
Header contentEncoding = method.getResponseHeader("Content-Encoding");
if (contentEncoding != null) {
if (contentEncoding.getValue().equals("gzip")) {
stream = new GZIPInputStream(stream);
}
}
// Read the response
if (statusCode == HttpStatus.SC_OK) {
ObjectMapper mapper = new ObjectMapper(factory);
root = mapper.readValue(stream, JsonNode.class);
traceDocument(root);
checkForError(root);
} else {
try {
while (stream.read() >= 0) {
//
}
} catch (IOException e) {
//
}
}
// Act depending on the status
if (statusCode != HttpStatus.SC_OK) {
String message = "URL access returned " + HttpStatus.getStatusText(statusCode);
log.error(message);
if (attempt > maxTry) {
log.warn("Error. Maximum attempts count reached.");
throw new APIException(message);
}
try {
Thread.sleep(30000);
} catch (InterruptedException e) {
// Nothing
}
} else {
return root;
}
} catch (IOException e) {
String message = "IOException: " + e.getMessage();
log.error(message);
if (attempt > maxTry) {
log.warn("Error. Maximum attempts count reached.");
throw new APIException("Error accessing MediaWiki", e);
}
try {
Thread.sleep(30000);
} catch (InterruptedException e2) {
// Nothing
}
} catch (APIException e) {
if (!e.shouldRetry() || (attempt > e.getMaxRetry())) {
throw e;
}
e.waitForRetry();
} finally {
if (stream != null) {
try {
stream.close();
} catch (IOException e) {
log.warn("Error closing stream");
}
}
if (method != null) {
method.releaseConnection();
}
}
log.warn("Error. Trying again");
}
}
use of org.wikipediacleaner.api.APIException in project wpcleaner by WPCleaner.
the class ApiXmlResult method getRoot.
/**
* Send a request to MediaWiki API.
*
* @param properties Properties defining the request.
* @param maxTry Maximum number of tries.
* @return Answer of MediaWiki API.
* @throws JDOMParseException Exception thrown due to the DOM.
* @throws APIException Exception thrown by the API.
*/
protected Element getRoot(Map<String, String> properties, int maxTry) throws JDOMParseException, APIException {
int attempt = 0;
for (; ; ) {
Element root = null;
HttpMethod method = null;
InputStream stream = null;
try {
// Executing HTTP method
attempt++;
method = createHttpMethod(properties);
int statusCode = getHttpClient().executeMethod(method);
// Accessing response
stream = method.getResponseBodyAsStream();
stream = new BufferedInputStream(stream);
Header contentEncoding = method.getResponseHeader("Content-Encoding");
if (contentEncoding != null) {
if (contentEncoding.getValue().equals("gzip")) {
stream = new GZIPInputStream(stream);
}
}
// Read the response
if (statusCode == HttpStatus.SC_OK) {
SAXBuilder sxb = new SAXBuilder();
Document document = sxb.build(stream);
traceDocument(document);
root = document.getRootElement();
checkForError(root);
} else {
try {
while (stream.read() >= 0) {
//
}
} catch (IOException e) {
//
}
}
// Act depending on the status
if (statusCode != HttpStatus.SC_OK) {
String message = "URL access returned " + HttpStatus.getStatusText(statusCode);
log.error(message);
if (attempt > maxTry) {
log.warn("Error. Maximum attempts count reached.");
throw new APIException(message);
}
try {
Thread.sleep(30000);
} catch (InterruptedException e) {
// Nothing
}
} else {
return root;
}
} catch (JDOMException e) {
String message = "JDOMException: " + e.getMessage();
log.error(message);
if (attempt > maxTry) {
log.warn("Error. Maximum attempts count reached.");
throw new APIException("Error parsing XML result", e);
}
try {
Thread.sleep(30000);
} catch (InterruptedException e2) {
// Nothing
}
} catch (IOException e) {
String message = "IOException: " + e.getMessage();
log.error(message);
if (attempt > maxTry) {
log.warn("Error. Maximum attempts count reached.");
throw new APIException("Error accessing MediaWiki", e);
}
try {
Thread.sleep(30000);
} catch (InterruptedException e2) {
// Nothing
}
} catch (APIException e) {
if (!e.shouldRetry() || (attempt > e.getMaxRetry())) {
throw e;
}
e.waitForRetry();
} finally {
if (stream != null) {
try {
stream.close();
} catch (IOException e) {
log.warn("Error closing stream");
}
}
if (method != null) {
method.releaseConnection();
}
}
log.warn("Error. Trying again");
}
}
use of org.wikipediacleaner.api.APIException in project wpcleaner by WPCleaner.
the class ApiXmlResult method checkForError.
/**
* Check for errors reported by the API.
*
* @param root Document root.
* @throws APIException Exception thrown by the API.
*/
protected void checkForError(Element root) throws APIException {
if (root == null) {
return;
}
// Check for errors
XPathExpression<Element> xpa = XPathFactory.instance().compile("/api/error", Filters.element());
List<Element> listErrors = xpa.evaluate(root);
if (listErrors != null) {
Iterator<Element> iterErrors = listErrors.iterator();
while (iterErrors.hasNext()) {
Element currentNode = iterErrors.next();
String text = "Error reported: " + currentNode.getAttributeValue("code") + " - " + currentNode.getAttributeValue("info");
log.warn(text);
throw new APIException(text, currentNode.getAttributeValue("code"));
}
}
// Check for warnings
xpa = XPathFactory.instance().compile("/api/warnings/*", Filters.element());
List<Element> listWarnings = xpa.evaluate(root);
if (listWarnings != null) {
Iterator<Element> iterWarnings = listWarnings.iterator();
while (iterWarnings.hasNext()) {
Element currentNode = iterWarnings.next();
log.warn("Warning reported: " + currentNode.getName() + " - " + currentNode.getValue());
}
}
}
Aggregations