Search in sources :

Example 61 with InterruptedIOException

use of java.io.InterruptedIOException in project hbase by apache.

the class HBaseAdmin method snapshot.

@Override
public void snapshot(SnapshotDescription snapshotDesc) throws IOException, SnapshotCreationException, IllegalArgumentException {
    // actually take the snapshot
    HBaseProtos.SnapshotDescription snapshot = ProtobufUtil.createHBaseProtosSnapshotDesc(snapshotDesc);
    SnapshotResponse response = asyncSnapshot(snapshot);
    final IsSnapshotDoneRequest request = IsSnapshotDoneRequest.newBuilder().setSnapshot(snapshot).build();
    IsSnapshotDoneResponse done = null;
    long start = EnvironmentEdgeManager.currentTime();
    long max = response.getExpectedTimeout();
    long maxPauseTime = max / this.numRetries;
    int tries = 0;
    LOG.debug("Waiting a max of " + max + " ms for snapshot '" + ClientSnapshotDescriptionUtils.toString(snapshot) + "'' to complete. (max " + maxPauseTime + " ms per retry)");
    while (tries == 0 || ((EnvironmentEdgeManager.currentTime() - start) < max && !done.getDone())) {
        try {
            // sleep a backoff <= pauseTime amount
            long sleep = getPauseTime(tries++);
            sleep = sleep > maxPauseTime ? maxPauseTime : sleep;
            LOG.debug("(#" + tries + ") Sleeping: " + sleep + "ms while waiting for snapshot completion.");
            Thread.sleep(sleep);
        } catch (InterruptedException e) {
            throw (InterruptedIOException) new InterruptedIOException("Interrupted").initCause(e);
        }
        LOG.debug("Getting current status of snapshot from master...");
        done = executeCallable(new MasterCallable<IsSnapshotDoneResponse>(getConnection(), getRpcControllerFactory()) {

            @Override
            protected IsSnapshotDoneResponse rpcCall() throws Exception {
                return master.isSnapshotDone(getRpcController(), request);
            }
        });
    }
    if (!done.getDone()) {
        throw new SnapshotCreationException("Snapshot '" + snapshot.getName() + "' wasn't completed in expectedTime:" + max + " ms", snapshotDesc);
    }
}
Also used : InterruptedIOException(java.io.InterruptedIOException) SnapshotResponse(org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SnapshotResponse) RestoreSnapshotResponse(org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RestoreSnapshotResponse) SnapshotCreationException(org.apache.hadoop.hbase.snapshot.SnapshotCreationException) IsSnapshotDoneResponse(org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsSnapshotDoneResponse) IsSnapshotDoneRequest(org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsSnapshotDoneRequest) HBaseProtos(org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos)

Example 62 with InterruptedIOException

use of java.io.InterruptedIOException in project hbase by apache.

the class RpcRetryingCallerImpl method callWithRetries.

@Override
public T callWithRetries(RetryingCallable<T> callable, int callTimeout) throws IOException, RuntimeException {
    List<RetriesExhaustedException.ThrowableWithExtraContext> exceptions = new ArrayList<>();
    tracker.start();
    context.clear();
    for (int tries = 0; ; tries++) {
        long expectedSleep;
        try {
            // bad cache entries are cleared in the call to RetryingCallable#throwable() in catch block
            callable.prepare(tries != 0);
            interceptor.intercept(context.prepare(callable, tries));
            return callable.call(getTimeout(callTimeout));
        } catch (PreemptiveFastFailException e) {
            throw e;
        } catch (Throwable t) {
            Throwable e = t.getCause();
            ExceptionUtil.rethrowIfInterrupt(t);
            // translateException throws exception when should not retry: i.e. when request is bad.
            interceptor.handleFailure(context, t);
            t = translateException(t);
            if (tries > startLogErrorsCnt) {
                LOG.info("Call exception, tries=" + tries + ", maxAttempts=" + maxAttempts + ", started=" + (EnvironmentEdgeManager.currentTime() - tracker.getStartTime()) + " ms ago, " + "cancelled=" + cancelled.get() + ", msg=" + t.getMessage() + " " + callable.getExceptionMessageAdditionalDetail());
            }
            callable.throwable(t, maxAttempts != 1);
            RetriesExhaustedException.ThrowableWithExtraContext qt = new RetriesExhaustedException.ThrowableWithExtraContext(t, EnvironmentEdgeManager.currentTime(), toString());
            exceptions.add(qt);
            if (tries >= maxAttempts - 1) {
                throw new RetriesExhaustedException(tries, exceptions);
            }
            // If the server is dead, we need to wait a little before retrying, to give
            // a chance to the regions to be moved
            // get right pause time, start by RETRY_BACKOFF[0] * pauseBase, where pauseBase might be
            // special when encountering CallQueueTooBigException, see #HBASE-17114
            long pauseBase = (t instanceof CallQueueTooBigException) ? pauseForCQTBE : pause;
            expectedSleep = callable.sleep(pauseBase, tries);
            // If, after the planned sleep, there won't be enough time left, we stop now.
            long duration = singleCallDuration(expectedSleep);
            if (duration > callTimeout) {
                String msg = "callTimeout=" + callTimeout + ", callDuration=" + duration + ": " + t.getMessage() + " " + callable.getExceptionMessageAdditionalDetail();
                throw (SocketTimeoutException) (new SocketTimeoutException(msg).initCause(t));
            }
        } finally {
            interceptor.updateFailureInfo(context);
        }
        try {
            if (expectedSleep > 0) {
                synchronized (cancelled) {
                    if (cancelled.get())
                        return null;
                    cancelled.wait(expectedSleep);
                }
            }
            if (cancelled.get())
                return null;
        } catch (InterruptedException e) {
            throw new InterruptedIOException("Interrupted after " + tries + " tries while maxAttempts=" + maxAttempts);
        }
    }
}
Also used : InterruptedIOException(java.io.InterruptedIOException) CallQueueTooBigException(org.apache.hadoop.hbase.CallQueueTooBigException) ArrayList(java.util.ArrayList) PreemptiveFastFailException(org.apache.hadoop.hbase.exceptions.PreemptiveFastFailException) SocketTimeoutException(java.net.SocketTimeoutException)

Example 63 with InterruptedIOException

use of java.io.InterruptedIOException in project hbase by apache.

the class RpcRetryingCallerWithReadReplicas method call.

/**
   * <p>
   * Algo:
   * - we put the query into the execution pool.
   * - after x ms, if we don't have a result, we add the queries for the secondary replicas
   * - we take the first answer
   * - when done, we cancel what's left. Cancelling means:
   * - removing from the pool if the actual call was not started
   * - interrupting the call if it has started
   * Client side, we need to take into account
   * - a call is not executed immediately after being put into the pool
   * - a call is a thread. Let's not multiply the number of thread by the number of replicas.
   * Server side, if we can cancel when it's still in the handler pool, it's much better, as a call
   * can take some i/o.
   * </p>
   * Globally, the number of retries, timeout and so on still applies, but it's per replica,
   * not global. We continue until all retries are done, or all timeouts are exceeded.
   */
public Result call(int operationTimeout) throws DoNotRetryIOException, InterruptedIOException, RetriesExhaustedException {
    boolean isTargetReplicaSpecified = (get.getReplicaId() >= 0);
    RegionLocations rl = getRegionLocations(true, (isTargetReplicaSpecified ? get.getReplicaId() : RegionReplicaUtil.DEFAULT_REPLICA_ID), cConnection, tableName, get.getRow());
    final ResultBoundedCompletionService<Result> cs = new ResultBoundedCompletionService<>(this.rpcRetryingCallerFactory, pool, rl.size());
    int startIndex = 0;
    int endIndex = rl.size();
    if (isTargetReplicaSpecified) {
        addCallsForReplica(cs, rl, get.getReplicaId(), get.getReplicaId());
        endIndex = 1;
    } else {
        addCallsForReplica(cs, rl, 0, 0);
        try {
            // wait for the timeout to see whether the primary responds back
            // Yes, microseconds
            Future<Result> f = cs.poll(timeBeforeReplicas, TimeUnit.MICROSECONDS);
            if (f != null) {
                //great we got a response
                return f.get();
            }
        } catch (ExecutionException e) {
            // We ignore the ExecutionException and continue with the secondary replicas
            if (LOG.isDebugEnabled()) {
                LOG.debug("Primary replica returns " + e.getCause());
            }
            // Skip the result from the primary as we know that there is something wrong
            startIndex = 1;
        } catch (CancellationException e) {
            throw new InterruptedIOException();
        } catch (InterruptedException e) {
            throw new InterruptedIOException();
        }
        // submit call for the all of the secondaries at once
        addCallsForReplica(cs, rl, 1, rl.size() - 1);
    }
    try {
        Future<Result> f = cs.pollForFirstSuccessfullyCompletedTask(operationTimeout, TimeUnit.MILLISECONDS, startIndex, endIndex);
        if (f == null) {
            throw new RetriesExhaustedException("timed out after " + operationTimeout + " ms");
        }
        return f.get();
    } catch (ExecutionException e) {
        throwEnrichedException(e, retries);
    } catch (CancellationException e) {
        throw new InterruptedIOException();
    } catch (InterruptedException e) {
        throw new InterruptedIOException();
    } finally {
        // We get there because we were interrupted or because one or more of the
        // calls succeeded or failed. In all case, we stop all our tasks.
        cs.cancelAll();
    }
    // unreachable
    LOG.error("Imposible? Arrive at an unreachable line...");
    // unreachable
    return null;
}
Also used : RegionLocations(org.apache.hadoop.hbase.RegionLocations) InterruptedIOException(java.io.InterruptedIOException) CancellationException(java.util.concurrent.CancellationException) ExecutionException(java.util.concurrent.ExecutionException)

Example 64 with InterruptedIOException

use of java.io.InterruptedIOException in project hbase by apache.

the class ScannerCallable method rpcCall.

@Override
protected Result[] rpcCall() throws Exception {
    if (Thread.interrupted()) {
        throw new InterruptedIOException();
    }
    if (closed) {
        close();
        return null;
    }
    ScanResponse response;
    if (this.scannerId == -1L) {
        response = openScanner();
    } else {
        response = next();
    }
    long timestamp = System.currentTimeMillis();
    setHeartbeatMessage(response.hasHeartbeatMessage() && response.getHeartbeatMessage());
    Result[] rrs = ResponseConverter.getResults(getRpcControllerCellScanner(), response);
    if (logScannerActivity) {
        long now = System.currentTimeMillis();
        if (now - timestamp > logCutOffLatency) {
            int rows = rrs == null ? 0 : rrs.length;
            LOG.info("Took " + (now - timestamp) + "ms to fetch " + rows + " rows from scanner=" + scannerId);
        }
    }
    updateServerSideMetrics(response);
    // moreResults is only used for the case where a filter exhausts all elements
    if (response.hasMoreResults()) {
        if (response.getMoreResults()) {
            setMoreResultsForScan(MoreResults.YES);
        } else {
            setMoreResultsForScan(MoreResults.NO);
            setAlreadyClosed();
        }
    } else {
        setMoreResultsForScan(MoreResults.UNKNOWN);
    }
    if (response.hasMoreResultsInRegion()) {
        if (response.getMoreResultsInRegion()) {
            setMoreResultsInRegion(MoreResults.YES);
        } else {
            setMoreResultsInRegion(MoreResults.NO);
            setAlreadyClosed();
        }
    } else {
        setMoreResultsInRegion(MoreResults.UNKNOWN);
    }
    updateResultsMetrics(rrs);
    return rrs;
}
Also used : InterruptedIOException(java.io.InterruptedIOException) ScanResponse(org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.ScanResponse)

Example 65 with InterruptedIOException

use of java.io.InterruptedIOException in project hbase by apache.

the class SimpleRequestController method newChecker.

@VisibleForTesting
static Checker newChecker(List<RowChecker> checkers) {
    return new Checker() {

        private boolean isEnd = false;

        @Override
        public ReturnCode canTakeRow(HRegionLocation loc, Row row) {
            if (isEnd) {
                return ReturnCode.END;
            }
            long heapSizeOfRow = (row instanceof Mutation) ? ((Mutation) row).heapSize() : 0;
            ReturnCode code = ReturnCode.INCLUDE;
            for (RowChecker checker : checkers) {
                switch(checker.canTakeOperation(loc, heapSizeOfRow)) {
                    case END:
                        isEnd = true;
                        code = ReturnCode.END;
                        break;
                    case SKIP:
                        code = ReturnCode.SKIP;
                        break;
                    case INCLUDE:
                    default:
                        break;
                }
                if (code == ReturnCode.END) {
                    break;
                }
            }
            for (RowChecker checker : checkers) {
                checker.notifyFinal(code, loc, heapSizeOfRow);
            }
            return code;
        }

        @Override
        public void reset() throws InterruptedIOException {
            isEnd = false;
            InterruptedIOException e = null;
            for (RowChecker checker : checkers) {
                try {
                    checker.reset();
                } catch (InterruptedIOException ex) {
                    e = ex;
                }
            }
            if (e != null) {
                throw e;
            }
        }
    };
}
Also used : InterruptedIOException(java.io.InterruptedIOException) HRegionLocation(org.apache.hadoop.hbase.HRegionLocation) VisibleForTesting(com.google.common.annotations.VisibleForTesting)

Aggregations

InterruptedIOException (java.io.InterruptedIOException)286 IOException (java.io.IOException)195 Test (org.junit.Test)40 Socket (java.net.Socket)28 ArrayList (java.util.ArrayList)27 InputStream (java.io.InputStream)23 ExecutionException (java.util.concurrent.ExecutionException)23 ConnectException (java.net.ConnectException)22 InetSocketAddress (java.net.InetSocketAddress)21 ByteBuffer (java.nio.ByteBuffer)21 Path (org.apache.hadoop.fs.Path)20 NoRouteToHostException (java.net.NoRouteToHostException)19 EOFException (java.io.EOFException)17 OutputStream (java.io.OutputStream)17 SocketTimeoutException (java.net.SocketTimeoutException)17 ServletException (javax.servlet.ServletException)17 CountDownLatch (java.util.concurrent.CountDownLatch)16 SocketException (java.net.SocketException)15 HttpServletRequest (javax.servlet.http.HttpServletRequest)15 HttpServletResponse (javax.servlet.http.HttpServletResponse)15