Search in sources :

Example 1 with HashedBytes

use of org.apache.hadoop.hbase.util.HashedBytes in project hbase by apache.

the class HRegion method getRowLockInternal.

protected RowLock getRowLockInternal(byte[] row, boolean readLock) throws IOException {
    // create an object to use a a key in the row lock map
    HashedBytes rowKey = new HashedBytes(row);
    RowLockContext rowLockContext = null;
    RowLockImpl result = null;
    TraceScope traceScope = null;
    // If we're tracing start a span to show how long this took.
    if (Trace.isTracing()) {
        traceScope = Trace.startSpan("HRegion.getRowLock");
        traceScope.getSpan().addTimelineAnnotation("Getting a " + (readLock ? "readLock" : "writeLock"));
    }
    try {
        // TODO: do we need to add a time component here?
        while (result == null) {
            rowLockContext = computeIfAbsent(lockedRows, rowKey, () -> new RowLockContext(rowKey));
            // This can fail as
            if (readLock) {
                result = rowLockContext.newReadLock();
            } else {
                result = rowLockContext.newWriteLock();
            }
        }
        int timeout = rowLockWaitDuration;
        boolean reachDeadlineFirst = false;
        RpcCall call = RpcServer.getCurrentCall();
        if (call != null && call.getDeadline() < Long.MAX_VALUE) {
            int timeToDeadline = (int) (call.getDeadline() - System.currentTimeMillis());
            if (timeToDeadline <= this.rowLockWaitDuration) {
                reachDeadlineFirst = true;
                timeout = timeToDeadline;
            }
        }
        if (timeout <= 0 || !result.getLock().tryLock(timeout, TimeUnit.MILLISECONDS)) {
            if (traceScope != null) {
                traceScope.getSpan().addTimelineAnnotation("Failed to get row lock");
            }
            result = null;
            // Clean up the counts just in case this was the thing keeping the context alive.
            rowLockContext.cleanUp();
            String message = "Timed out waiting for lock for row: " + rowKey + " in region " + getRegionInfo().getEncodedName();
            if (reachDeadlineFirst) {
                throw new TimeoutIOException(message);
            } else {
                // If timeToDeadline is larger than rowLockWaitDuration, we can not drop the request.
                throw new IOException(message);
            }
        }
        rowLockContext.setThreadName(Thread.currentThread().getName());
        return result;
    } catch (InterruptedException ie) {
        LOG.warn("Thread interrupted waiting for lock on row: " + rowKey);
        InterruptedIOException iie = new InterruptedIOException();
        iie.initCause(ie);
        if (traceScope != null) {
            traceScope.getSpan().addTimelineAnnotation("Interrupted exception getting row lock");
        }
        Thread.currentThread().interrupt();
        throw iie;
    } finally {
        if (traceScope != null) {
            traceScope.close();
        }
    }
}
Also used : InterruptedIOException(java.io.InterruptedIOException) RpcCall(org.apache.hadoop.hbase.ipc.RpcCall) TraceScope(org.apache.htrace.TraceScope) InterruptedIOException(java.io.InterruptedIOException) IOException(java.io.IOException) MultipleIOException(org.apache.hadoop.io.MultipleIOException) DoNotRetryIOException(org.apache.hadoop.hbase.DoNotRetryIOException) TimeoutIOException(org.apache.hadoop.hbase.exceptions.TimeoutIOException) TimeoutIOException(org.apache.hadoop.hbase.exceptions.TimeoutIOException) HashedBytes(org.apache.hadoop.hbase.util.HashedBytes)

Aggregations

IOException (java.io.IOException)1 InterruptedIOException (java.io.InterruptedIOException)1 DoNotRetryIOException (org.apache.hadoop.hbase.DoNotRetryIOException)1 TimeoutIOException (org.apache.hadoop.hbase.exceptions.TimeoutIOException)1 RpcCall (org.apache.hadoop.hbase.ipc.RpcCall)1 HashedBytes (org.apache.hadoop.hbase.util.HashedBytes)1 MultipleIOException (org.apache.hadoop.io.MultipleIOException)1 TraceScope (org.apache.htrace.TraceScope)1