use of java.io.InterruptedIOException in project bazel by bazelbuild.
the class HttpConnector method connect.
URLConnection connect(URL originalUrl, ImmutableMap<String, String> requestHeaders) throws IOException {
if (Thread.interrupted()) {
throw new InterruptedIOException();
}
URL url = originalUrl;
if (HttpUtils.isProtocol(url, "file")) {
return url.openConnection();
}
List<Throwable> suppressions = new ArrayList<>();
int retries = 0;
int redirects = 0;
int connectTimeout = MIN_CONNECT_TIMEOUT_MS;
while (true) {
HttpURLConnection connection = null;
try {
connection = (HttpURLConnection) url.openConnection(proxyHelper.createProxyIfNeeded(url));
boolean isAlreadyCompressed = COMPRESSED_EXTENSIONS.contains(HttpUtils.getExtension(url.getPath())) || COMPRESSED_EXTENSIONS.contains(HttpUtils.getExtension(originalUrl.getPath()));
for (Map.Entry<String, String> entry : requestHeaders.entrySet()) {
if (isAlreadyCompressed && Ascii.equalsIgnoreCase(entry.getKey(), "Accept-Encoding")) {
// appears to be compressed.
continue;
}
connection.setRequestProperty(entry.getKey(), entry.getValue());
}
connection.setConnectTimeout(connectTimeout);
// The read timeout is always large because it stays in effect after this method.
connection.setReadTimeout(READ_TIMEOUT_MS);
// Java tries to abstract HTTP error responses for us. We don't want that. So we're going
// to try and undo any IOException that doesn't appear to be a legitimate I/O exception.
int code;
try {
connection.connect();
code = connection.getResponseCode();
} catch (FileNotFoundException ignored) {
code = connection.getResponseCode();
} catch (UnknownHostException e) {
String message = "Unknown host: " + e.getMessage();
eventHandler.handle(Event.progress(message));
throw new UnrecoverableHttpException(message);
} catch (IllegalArgumentException e) {
// This will happen if the user does something like specify a port greater than 2^16-1.
throw new UnrecoverableHttpException(e.getMessage());
} catch (IOException e) {
// message.
if (e.getMessage() == null) {
throw new UnrecoverableHttpException("Failed to even get an error message from " + url);
}
if (!e.getMessage().startsWith("Server returned")) {
throw e;
}
code = connection.getResponseCode();
}
// 206 means partial content and only happens if caller specified Range. See RFC7233 § 4.1.
if (code == 200 || code == 206) {
return connection;
} else if (code == 301 || code == 302 || code == 307) {
readAllBytesAndClose(connection.getInputStream());
if (++redirects == MAX_REDIRECTS) {
eventHandler.handle(Event.progress("Redirect loop detected in " + originalUrl));
throw new UnrecoverableHttpException("Redirect loop detected");
}
url = HttpUtils.getLocation(connection);
if (code == 301) {
originalUrl = url;
}
} else if (code == 403) {
// jart@ has noticed BitBucket + Amazon AWS downloads frequently flake with this code.
throw new IOException(describeHttpResponse(connection));
} else if (code == 408) {
// that request on a new connection. Quoth RFC7231 § 6.5.7
throw new IOException(describeHttpResponse(connection));
} else if (// 4xx means client seems to have erred quoth RFC7231 § 6.5
code < 500 || // Server doesn't support function quoth RFC7231 § 6.6.2
code == 501 || // Host not configured on server cf. RFC7231 § 6.6.3
code == 502 || code == 505) {
// Server refuses to support version quoth RFC7231 § 6.6.6
// This is a permanent error so we're not going to retry.
readAllBytesAndClose(connection.getErrorStream());
throw new UnrecoverableHttpException(describeHttpResponse(connection));
} else {
// However we will retry on some 5xx errors, particularly 500 and 503.
throw new IOException(describeHttpResponse(connection));
}
} catch (UnrecoverableHttpException e) {
throw e;
} catch (IOException e) {
if (connection != null) {
// If we got here, it means we might not have consumed the entire payload of the
// response, if any. So we're going to force this socket to disconnect and not be
// reused. This is particularly important if multiple threads end up establishing
// connections to multiple mirrors simultaneously for a large file. We don't want to
// download that large file twice.
connection.disconnect();
}
// We don't respect the Retry-After header (RFC7231 § 7.1.3) because it's rarely used and
// tends to be too conservative when it is. We're already being good citizens by using
// exponential backoff. Furthermore RFC law didn't use the magic word "MUST".
int timeout = IntMath.pow(2, retries) * MIN_RETRY_DELAY_MS;
if (e instanceof SocketTimeoutException) {
eventHandler.handle(Event.progress("Timeout connecting to " + url));
connectTimeout = Math.min(connectTimeout * 2, MAX_CONNECT_TIMEOUT_MS);
// If we got connect timeout, we're already doing exponential backoff, so no point
// in sleeping too.
timeout = 1;
} else if (e instanceof InterruptedIOException) {
// Please note that SocketTimeoutException is a subtype of InterruptedIOException.
throw e;
}
if (++retries == MAX_RETRIES) {
if (!(e instanceof SocketTimeoutException)) {
eventHandler.handle(Event.progress(format("Error connecting to %s: %s", url, e.getMessage())));
}
for (Throwable suppressed : suppressions) {
e.addSuppressed(suppressed);
}
throw e;
}
// Java 7 allows us to create a tree of all errors that led to the ultimate failure.
suppressions.add(e);
eventHandler.handle(Event.progress(format("Failed to connect to %s trying again in %,dms", url, timeout)));
url = originalUrl;
try {
sleeper.sleepMillis(timeout);
} catch (InterruptedException translated) {
throw new InterruptedIOException();
}
} catch (RuntimeException e) {
if (connection != null) {
connection.disconnect();
}
eventHandler.handle(Event.progress(format("Unknown error connecting to %s: %s", url, e)));
throw e;
}
}
}
use of java.io.InterruptedIOException in project bazel by bazelbuild.
the class HttpDownloader method download.
/**
* Downloads file to disk and returns path.
*
* <p>If the SHA256 checksum and path to the repository cache is specified, attempt to load the
* file from the {@link RepositoryCache}. If it doesn't exist, proceed to download the file and
* load it into the cache prior to returning the value.
*
* @param urls list of mirror URLs with identical content
* @param sha256 valid SHA256 hex checksum string which is checked, or empty to disable
* @param type extension, e.g. "tar.gz" to force on downloaded filename, or empty to not do this
* @param output destination filename if {@code type} is <i>absent</i>, otherwise output directory
* @param eventHandler CLI progress reporter
* @param clientEnv environment variables in shell issuing this command
* @throws IllegalArgumentException on parameter badness, which should be checked beforehand
* @throws IOException if download was attempted and ended up failing
* @throws InterruptedException if this thread is being cast into oblivion
*/
public Path download(List<URL> urls, String sha256, Optional<String> type, Path output, ExtendedEventHandler eventHandler, Map<String, String> clientEnv) throws IOException, InterruptedException {
if (Thread.interrupted()) {
throw new InterruptedException();
}
Path destination = getDownloadDestination(urls.get(0), type, output);
// Used to decide whether to cache the download at the end of this method.
boolean isCaching = false;
if (!sha256.isEmpty()) {
try {
String currentSha256 = RepositoryCache.getChecksum(KeyType.SHA256, destination);
if (currentSha256.equals(sha256)) {
// No need to download.
return destination;
}
} catch (IOException e) {
// Ignore error trying to hash. We'll attempt to retrieve from cache or just download again.
}
if (repositoryCache.isEnabled()) {
isCaching = true;
Path cachedDestination = repositoryCache.get(sha256, destination, KeyType.SHA256);
if (cachedDestination != null) {
// Cache hit!
return cachedDestination;
}
}
}
// TODO: Consider using Dagger2 to automate this.
Clock clock = new JavaClock();
Sleeper sleeper = new JavaSleeper();
Locale locale = Locale.getDefault();
ProxyHelper proxyHelper = new ProxyHelper(clientEnv);
HttpConnector connector = new HttpConnector(locale, eventHandler, proxyHelper, sleeper);
ProgressInputStream.Factory progressInputStreamFactory = new ProgressInputStream.Factory(locale, clock, eventHandler);
HttpStream.Factory httpStreamFactory = new HttpStream.Factory(progressInputStreamFactory);
HttpConnectorMultiplexer multiplexer = new HttpConnectorMultiplexer(eventHandler, connector, httpStreamFactory, clock, sleeper);
// Connect to the best mirror and download the file, while reporting progress to the CLI.
semaphore.acquire();
try (HttpStream payload = multiplexer.connect(urls, sha256);
OutputStream out = destination.getOutputStream()) {
ByteStreams.copy(payload, out);
} catch (InterruptedIOException e) {
throw new InterruptedException();
} catch (IOException e) {
throw new IOException("Error downloading " + urls + " to " + destination + ": " + e.getMessage());
} finally {
semaphore.release();
}
if (isCaching) {
repositoryCache.put(sha256, destination, KeyType.SHA256);
}
return destination;
}
use of java.io.InterruptedIOException in project bazel by bazelbuild.
the class RetryingInputStream method reconnectWhereWeLeftOff.
private void reconnectWhereWeLeftOff(IOException cause) throws IOException {
try {
URLConnection connection;
long amountRead = toto.get();
if (amountRead == 0) {
connection = reconnector.connect(cause, ImmutableMap.<String, String>of());
} else {
connection = reconnector.connect(cause, ImmutableMap.of("Range", String.format("bytes %d-", amountRead)));
if (!Strings.nullToEmpty(connection.getHeaderField("Content-Range")).startsWith(String.format("bytes %d-", amountRead))) {
throw new IOException(String.format("Tried to reconnect at offset %,d but server didn't support it", amountRead));
}
}
delegate = new InterruptibleInputStream(connection.getInputStream());
} catch (InterruptedIOException e) {
throw e;
} catch (IOException e) {
propagate(e);
}
}
use of java.io.InterruptedIOException in project grpc-java by grpc.
the class Util method skipAll.
/**
* Reads until {@code in} is exhausted or the deadline has been reached. This is careful to not
* extend the deadline if one exists already.
*/
public static boolean skipAll(Source source, int duration, TimeUnit timeUnit) throws IOException {
long now = System.nanoTime();
long originalDuration = source.timeout().hasDeadline() ? source.timeout().deadlineNanoTime() - now : Long.MAX_VALUE;
source.timeout().deadlineNanoTime(now + Math.min(originalDuration, timeUnit.toNanos(duration)));
try {
Buffer skipBuffer = new Buffer();
while (source.read(skipBuffer, 2048) != -1) {
skipBuffer.clear();
}
// Success! The source has been exhausted.
return true;
} catch (InterruptedIOException e) {
// We ran out of time before exhausting the source.
return false;
} finally {
if (originalDuration == Long.MAX_VALUE) {
source.timeout().clearDeadline();
} else {
source.timeout().deadlineNanoTime(now + originalDuration);
}
}
}
use of java.io.InterruptedIOException in project xUtils by wyouflf.
the class DecompressingEntity method writeTo.
/**
* {@inheritDoc}
*/
@Override
public void writeTo(OutputStream outStream) throws IOException {
if (outStream == null) {
throw new IllegalArgumentException("Output stream may not be null");
}
InputStream inStream = null;
try {
inStream = getContent();
byte[] tmp = new byte[4096];
int len;
while ((len = inStream.read(tmp)) != -1) {
outStream.write(tmp, 0, len);
uploadedSize += len;
if (callBackHandler != null) {
if (!callBackHandler.updateProgress(uncompressedLength, uploadedSize, false)) {
throw new InterruptedIOException("cancel");
}
}
}
outStream.flush();
if (callBackHandler != null) {
callBackHandler.updateProgress(uncompressedLength, uploadedSize, true);
}
} finally {
IOUtils.closeQuietly(inStream);
}
}
Aggregations