use of org.apache.hadoop.hbase.exceptions.TimeoutIOException in project hbase by apache.
the class HRegion method replayWALEntry.
/**
* Replay remote wal entry sent by primary replica.
* <p/>
* Should only call this method on secondary replicas.
*/
void replayWALEntry(WALEntry entry, CellScanner cells) throws IOException {
long timeout = -1L;
Optional<RpcCall> call = RpcServer.getCurrentCall();
if (call.isPresent()) {
long deadline = call.get().getDeadline();
if (deadline < Long.MAX_VALUE) {
timeout = deadline - EnvironmentEdgeManager.currentTime();
if (timeout <= 0) {
throw new TimeoutIOException("Timeout while replaying edits for " + getRegionInfo());
}
}
}
if (timeout > 0) {
try {
if (!replayLock.tryLock(timeout, TimeUnit.MILLISECONDS)) {
throw new TimeoutIOException("Timeout while waiting for lock when replaying edits for " + getRegionInfo());
}
} catch (InterruptedException e) {
throw throwOnInterrupt(e);
}
} else {
replayLock.lock();
}
try {
int count = entry.getAssociatedCellCount();
long sequenceId = entry.getKey().getLogSequenceNumber();
if (lastReplayedSequenceId >= sequenceId) {
// need apply later WALEntries
for (int i = 0; i < count; i++) {
// Throw index out of bounds if our cell count is off
if (!cells.advance()) {
throw new ArrayIndexOutOfBoundsException("Expected=" + count + ", index=" + i);
}
}
return;
}
Map<byte[], List<Cell>> family2Cells = new TreeMap<>(Bytes.BYTES_COMPARATOR);
for (int i = 0; i < count; i++) {
// Throw index out of bounds if our cell count is off
if (!cells.advance()) {
throw new ArrayIndexOutOfBoundsException("Expected=" + count + ", index=" + i);
}
Cell cell = cells.current();
if (WALEdit.isMetaEditFamily(cell)) {
// guard logic to make sure we do not break things in the worst case.
if (!family2Cells.isEmpty()) {
replayWALBatchMutate(family2Cells);
family2Cells.clear();
}
replayWALMetaEdit(cell);
} else {
family2Cells.computeIfAbsent(CellUtil.cloneFamily(cell), k -> new ArrayList<>()).add(cell);
}
}
// do not forget to apply the remaining cells
if (!family2Cells.isEmpty()) {
replayWALBatchMutate(family2Cells);
}
mvcc.advanceTo(sequenceId);
lastReplayedSequenceId = sequenceId;
} finally {
replayLock.unlock();
}
}
use of org.apache.hadoop.hbase.exceptions.TimeoutIOException in project hbase by apache.
the class CallRunner method run.
public void run() {
try {
if (call.disconnectSince() >= 0) {
if (RpcServer.LOG.isDebugEnabled()) {
RpcServer.LOG.debug(Thread.currentThread().getName() + ": skipped " + call);
}
return;
}
call.setStartTime(EnvironmentEdgeManager.currentTime());
if (call.getStartTime() > call.getDeadline()) {
RpcServer.LOG.warn("Dropping timed out call: " + call);
this.rpcServer.getMetrics().callTimedOut();
return;
}
this.status.setStatus("Setting up call");
this.status.setConnection(call.getRemoteAddress().getHostAddress(), call.getRemotePort());
if (RpcServer.LOG.isTraceEnabled()) {
Optional<User> remoteUser = call.getRequestUser();
RpcServer.LOG.trace(call.toShortString() + " executing as " + (remoteUser.isPresent() ? remoteUser.get().getName() : "NULL principal"));
}
Throwable errorThrowable = null;
String error = null;
Pair<Message, CellScanner> resultPair = null;
RpcServer.CurCall.set(call);
Span span = new IpcServerSpanBuilder(call).build();
try (Scope traceScope = span.makeCurrent()) {
if (!this.rpcServer.isStarted()) {
InetSocketAddress address = rpcServer.getListenerAddress();
throw new ServerNotRunningYetException("Server " + (address != null ? address : "(channel closed)") + " is not running yet");
}
// make the call
resultPair = this.rpcServer.call(call, this.status);
} catch (TimeoutIOException e) {
RpcServer.LOG.warn("Can not complete this request in time, drop it: " + call);
TraceUtil.setError(span, e);
return;
} catch (Throwable e) {
TraceUtil.setError(span, e);
if (e instanceof ServerNotRunningYetException) {
// If ServerNotRunningYetException, don't spew stack trace.
if (RpcServer.LOG.isTraceEnabled()) {
RpcServer.LOG.trace(call.toShortString(), e);
}
} else {
// Don't dump full exception.. just String version
RpcServer.LOG.debug(call.toShortString() + ", exception=" + e);
}
errorThrowable = e;
error = StringUtils.stringifyException(e);
if (e instanceof Error) {
throw (Error) e;
}
} finally {
RpcServer.CurCall.set(null);
if (resultPair != null) {
this.rpcServer.addCallSize(call.getSize() * -1);
span.setStatus(StatusCode.OK);
sucessful = true;
}
span.end();
}
this.status.markComplete("To send response");
// return back the RPC request read BB we can do here. It is done by now.
call.cleanup();
// Set the response
Message param = resultPair != null ? resultPair.getFirst() : null;
CellScanner cells = resultPair != null ? resultPair.getSecond() : null;
call.setResponse(param, cells, errorThrowable, error);
call.sendResponseIfReady();
} catch (OutOfMemoryError e) {
if (this.rpcServer.getErrorHandler() != null) {
if (this.rpcServer.getErrorHandler().checkOOME(e)) {
RpcServer.LOG.info(Thread.currentThread().getName() + ": exiting on OutOfMemoryError");
return;
}
} else {
// rethrow if no handler
throw e;
}
} catch (ClosedChannelException cce) {
InetSocketAddress address = rpcServer.getListenerAddress();
RpcServer.LOG.warn(Thread.currentThread().getName() + ": caught a ClosedChannelException, " + "this means that the server " + (address != null ? address : "(channel closed)") + " was processing a request but the client went away. The error message was: " + cce.getMessage());
} catch (Exception e) {
RpcServer.LOG.warn(Thread.currentThread().getName() + ": caught: " + StringUtils.stringifyException(e));
} finally {
if (!sucessful) {
this.rpcServer.addCallSize(call.getSize() * -1);
}
if (this.status.isRPCRunning()) {
this.status.markComplete("Call error");
}
this.status.pause("Waiting for a call");
cleanup();
}
}
use of org.apache.hadoop.hbase.exceptions.TimeoutIOException in project hbase by apache.
the class AbstractFSWAL method shutdown.
@Override
public void shutdown() throws IOException {
if (!shutdown.compareAndSet(false, true)) {
return;
}
closed = true;
// Tell our listeners that the log is closing
if (!this.listeners.isEmpty()) {
for (WALActionsListener i : this.listeners) {
i.logCloseRequested();
}
}
Future<Void> future = logArchiveOrShutdownExecutor.submit(new Callable<Void>() {
@Override
public Void call() throws Exception {
if (rollWriterLock.tryLock(walShutdownTimeout, TimeUnit.SECONDS)) {
try {
doShutdown();
if (syncFutureCache != null) {
syncFutureCache.clear();
}
} finally {
rollWriterLock.unlock();
}
} else {
throw new IOException("Waiting for rollWriterLock timeout");
}
return null;
}
});
logArchiveOrShutdownExecutor.shutdown();
try {
future.get(walShutdownTimeout, TimeUnit.MILLISECONDS);
} catch (InterruptedException e) {
throw new InterruptedIOException("Interrupted when waiting for shutdown WAL");
} catch (TimeoutException e) {
throw new TimeoutIOException("We have waited " + walShutdownTimeout + "ms, but" + " the shutdown of WAL doesn't complete! Please check the status of underlying " + "filesystem or increase the wait time by the config \"" + WAL_SHUTDOWN_WAIT_TIMEOUT_MS + "\"", e);
} catch (ExecutionException e) {
if (e.getCause() instanceof IOException) {
throw (IOException) e.getCause();
} else {
throw new IOException(e.getCause());
}
}
}
use of org.apache.hadoop.hbase.exceptions.TimeoutIOException in project hbase by apache.
the class AsyncRegionLocator method withTimeout.
private <T> CompletableFuture<T> withTimeout(CompletableFuture<T> future, long timeoutNs, Supplier<String> timeoutMsg) {
if (future.isDone() || timeoutNs <= 0) {
return future;
}
Timeout timeoutTask = retryTimer.newTimeout(t -> {
if (future.isDone()) {
return;
}
future.completeExceptionally(new TimeoutIOException(timeoutMsg.get()));
}, timeoutNs, TimeUnit.NANOSECONDS);
FutureUtils.addListener(future, (loc, error) -> {
if (error != null && error.getClass() != TimeoutIOException.class) {
// cancel timeout task if we are not completed by it.
timeoutTask.cancel();
}
});
return future;
}
Aggregations