use of com.cloudant.http.internal.interceptors.HttpConnectionInterceptorException in project java-cloudant by cloudant.
the class HttpConnection method execute.
/**
* <p>
* Execute request without returning data from server.
* </p>
* <p>
* Call {@code responseAsString}, {@code responseAsBytes}, or {@code responseAsInputStream}
* after {@code execute} if the response body is required.
* </p>
* <P>
* Note if the URL contains user information it will be encoded in a BasicAuth header.
* </P>
*
* @return An {@link HttpConnection} which can be used to obtain the response body
* @throws IOException if there was a problem writing data to the server
*/
public HttpConnection execute() throws IOException {
boolean retry = true;
while (retry && numberOfRetries-- > 0) {
connection = connectionFactory.openConnection(url);
if (url.getUserInfo() != null) {
// Insert at position 0 in case another interceptor wants to overwrite the BasicAuth
requestInterceptors.add(0, new BasicAuthInterceptor(url.getUserInfo()));
}
// always read the result, so we can retrieve the HTTP response code
connection.setDoInput(true);
connection.setRequestMethod(requestMethod);
if (contentType != null) {
connection.setRequestProperty("Content-type", contentType);
}
// context.connection.getConnection().setChunkedStreamingMode(16384);
if (input != null) {
connection.setDoOutput(true);
if (inputLength != -1) {
// TODO Remove this cast to int when the minimum supported level is 1.7.
// On 1.7 upwards this method takes a long, otherwise int.
connection.setFixedLengthStreamingMode((int) this.inputLength);
} else {
// Use 0 for the default size
connection.setChunkedStreamingMode(0);
// Note that CouchDB does not currently work for a chunked multipart stream, see
// https://issues.apache.org/jira/browse/COUCHDB-1403. Cases that use
// multipart need to provide the content length until that is fixed.
}
}
currentContext = (currentContext == null) ? new HttpConnectionInterceptorContext(this) : new HttpConnectionInterceptorContext(this, currentContext.interceptorStates);
for (HttpConnectionRequestInterceptor requestInterceptor : requestInterceptors) {
currentContext = requestInterceptor.interceptRequest(currentContext);
}
// to the properties map
for (Map.Entry<String, String> property : requestProperties.entrySet()) {
connection.setRequestProperty(property.getKey(), property.getValue());
}
// Log the request
if (requestIsLoggable && logger.isLoggable(Level.FINE)) {
logger.fine(String.format("%s request%s", getLogRequestIdentifier(), (connection.usingProxy() ? " via proxy" : "")));
}
// Log the request headers
if (requestIsLoggable && logger.isLoggable(Level.FINER)) {
logger.finer(String.format("%s request headers %s", getLogRequestIdentifier(), connection.getRequestProperties()));
}
if (input != null) {
InputStream is = input.getInputStream();
OutputStream os = connection.getOutputStream();
try {
// The buffer size used for writing to this output stream has an impact on the
// HTTP chunk size, so we make it a pretty large size to avoid limiting the
// size
// of those chunks (although this appears in turn to set the chunk sizes).
IOUtils.copyLarge(is, os, new byte[16 * 1024]);
os.flush();
} finally {
Utils.close(is);
Utils.close(os);
}
}
// Log the response
if (requestIsLoggable && logger.isLoggable(Level.FINE)) {
logger.fine(String.format("%s response %s %s", getLogRequestIdentifier(), connection.getResponseCode(), connection.getResponseMessage()));
}
// Log the response headers
if (requestIsLoggable && logger.isLoggable(Level.FINER)) {
logger.finer(String.format("%s response headers %s", getLogRequestIdentifier(), connection.getHeaderFields()));
}
for (HttpConnectionResponseInterceptor responseInterceptor : responseInterceptors) {
try {
currentContext = responseInterceptor.interceptResponse(currentContext);
} catch (HttpConnectionInterceptorException e) {
// Sadly the current interceptor API doesn't allow an IOException to be thrown
// so to avoid swallowing them the interceptors need to wrap them in the runtime
// HttpConnectionInterceptorException and we can then unwrap them here.
Throwable cause = e.getCause();
if (cause != null && cause instanceof IOException) {
throw (IOException) cause;
} else {
throw e;
}
}
}
// retry flag is set from the final step in the response interceptRequest pipeline
retry = currentContext.replayRequest;
// makes the connection eligible for re-use.
if (retry && numberOfRetries > 0) {
Utils.consumeAndCloseStream(connection.getErrorStream());
}
if (numberOfRetries == 0) {
logger.info("Maximum number of retries reached");
}
}
// return ourselves to allow method chaining
return this;
}
use of com.cloudant.http.internal.interceptors.HttpConnectionInterceptorException in project java-cloudant by cloudant.
the class CouchDbClient method execute.
/**
* Execute a HTTP request and handle common error cases.
*
* @param connection the HttpConnection request to execute
* @return the executed HttpConnection
* @throws CouchDbException for HTTP error codes or if an IOException was thrown
*/
public HttpConnection execute(HttpConnection connection) {
// set our HttpUrlFactory on the connection
connection.connectionFactory = factory;
// all CouchClient requests want to receive application/json responses
connection.requestProperties.put("Accept", "application/json");
connection.responseInterceptors.addAll(this.responseInterceptors);
connection.requestInterceptors.addAll(this.requestInterceptors);
// error stream - response from server for a 500 etc
InputStream es = null;
// specific exceptions
try {
try {
connection = connection.execute();
} catch (HttpConnectionInterceptorException e) {
CouchDbException exception = new CouchDbException(connection.getConnection().getResponseMessage(), connection.getConnection().getResponseCode());
if (e.deserialize) {
try {
JsonObject errorResponse = new Gson().fromJson(e.error, JsonObject.class);
exception.error = getAsString(errorResponse, "error");
exception.reason = getAsString(errorResponse, "reason");
} catch (JsonParseException jpe) {
exception.error = e.error;
}
} else {
exception.error = e.error;
exception.reason = e.reason;
}
throw exception;
}
int code = connection.getConnection().getResponseCode();
String response = connection.getConnection().getResponseMessage();
// everything ok? return the stream
if (code / 100 == 2) {
// success [200,299]
return connection;
} else {
final CouchDbException ex;
switch(code) {
case // 404
HttpURLConnection.HTTP_NOT_FOUND:
ex = new NoDocumentException(response);
break;
case // 409
HttpURLConnection.HTTP_CONFLICT:
ex = new DocumentConflictException(response);
break;
case // 412
HttpURLConnection.HTTP_PRECON_FAILED:
ex = new PreconditionFailedException(response);
break;
case 429:
// If a Replay429Interceptor is present it will check for 429 and retry at
// intervals. If the retries do not succeed or no 429 replay was configured
// we end up here and throw a TooManyRequestsException.
ex = new TooManyRequestsException(response);
break;
default:
ex = new CouchDbException(response, code);
break;
}
es = connection.getConnection().getErrorStream();
// if there is an error stream try to deserialize into the typed exception
if (es != null) {
try {
// read the error stream into memory
byte[] errorResponse = IOUtils.toByteArray(es);
Class<? extends CouchDbException> exceptionClass = ex.getClass();
// treat the error as JSON and try to deserialize
try {
// Register an InstanceCreator that returns the existing exception so
// we can just populate the fields, but not ignore the constructor.
// Uses a new Gson so we don't accidentally recycle an exception.
Gson g = new GsonBuilder().registerTypeAdapter(exceptionClass, new CouchDbExceptionInstanceCreator(ex)).create();
// Now populate the exception with the error/reason other info from JSON
g.fromJson(new InputStreamReader(new ByteArrayInputStream(errorResponse), "UTF-8"), exceptionClass);
} catch (JsonParseException e) {
// The error stream was not JSON so just set the string content as the
// error field on ex before we throw it
ex.error = new String(errorResponse, "UTF-8");
}
} finally {
close(es);
}
}
ex.setUrl(connection.url.toString());
throw ex;
}
} catch (IOException ioe) {
CouchDbException ex = new CouchDbException("Error retrieving server response", ioe);
ex.setUrl(connection.url.toString());
throw ex;
}
}
use of com.cloudant.http.internal.interceptors.HttpConnectionInterceptorException in project java-cloudant by cloudant.
the class Replay429Interceptor method interceptResponse.
@Override
public HttpConnectionInterceptorContext interceptResponse(HttpConnectionInterceptorContext context) {
try {
HttpURLConnection urlConnection = context.connection.getConnection();
int code = urlConnection.getResponseCode();
// We only want to take action on a 429 response
if (code != 429) {
return context;
}
// We received a 429
// Get the counter from the request context state
AtomicInteger attemptCounter = context.getState(this, ATTEMPT, AtomicInteger.class);
// If there was no counter yet, then this is the first 429 received for this request
if (attemptCounter == null) {
context.setState(this, ATTEMPT, (attemptCounter = new AtomicInteger()));
}
// Get the current value, and then increment for the next time round
int attempt = attemptCounter.getAndIncrement();
// Check if we have remaining replays
if (attempt < numberOfReplays && context.connection.getNumberOfRetriesRemaining() > 0) {
// Calculate the backoff time, 2^n * initial sleep
long sleepTime = initialSleep * Math.round(Math.pow(2, attempt));
// If the response includes a Retry-After then that is when we will retry, otherwise
// we use the doubling sleep
String retryAfter = preferRetryAfter ? urlConnection.getHeaderField("Retry-After") : null;
if (retryAfter != null) {
// about parsing dates for now.
try {
sleepTime = Long.parseLong(retryAfter) * 1000;
if (sleepTime > RETRY_AFTER_CAP) {
sleepTime = RETRY_AFTER_CAP;
logger.severe("Server specified Retry-After value in excess of one " + "hour, capping retry.");
}
} catch (NumberFormatException nfe) {
logger.warning("Invalid Retry-After value from server falling back to " + "default backoff.");
}
}
// Read the reasons and log a warning
String errorString = Utils.collectAndCloseStream(urlConnection.getErrorStream());
logger.warning(errorString + " will retry in " + sleepTime + " ms");
logger.fine("Too many requests backing off for " + sleepTime + " ms.");
// Sleep the thread for the appropriate backoff time
try {
TimeUnit.MILLISECONDS.sleep(sleepTime);
} catch (InterruptedException e) {
logger.fine("Interrupted during 429 backoff wait.");
// If the thread was interrupted we'll just continue and try again a bit earlier
// than planned.
}
// Get ready to replay the request after the backoff time
context.replayRequest = true;
return context;
} else {
return context;
}
} catch (IOException e) {
throw new HttpConnectionInterceptorException(e);
}
}
Aggregations