Search in sources :

Example 36 with RegionLocations

use of org.apache.hadoop.hbase.RegionLocations in project hbase by apache.

the class MetaCache method getCachedLocation.

/**
   * Search the cache for a location that fits our table and row key.
   * Return null if no suitable region is located.
   *
   * @return Null or region location found in cache.
   */
public RegionLocations getCachedLocation(final TableName tableName, final byte[] row) {
    ConcurrentNavigableMap<byte[], RegionLocations> tableLocations = getTableLocations(tableName);
    Entry<byte[], RegionLocations> e = tableLocations.floorEntry(row);
    if (e == null) {
        if (metrics != null)
            metrics.incrMetaCacheMiss();
        return null;
    }
    RegionLocations possibleRegion = e.getValue();
    // make sure that the end key is greater than the row we're looking
    // for, otherwise the row actually belongs in the next region, not
    // this one. the exception case is when the endkey is
    // HConstants.EMPTY_END_ROW, signifying that the region we're
    // checking is actually the last region in the table.
    byte[] endKey = possibleRegion.getRegionLocation().getRegionInfo().getEndKey();
    // HConstants.EMPTY_END_ROW) check itself will pass.
    if (Bytes.equals(endKey, HConstants.EMPTY_END_ROW) || Bytes.compareTo(endKey, 0, endKey.length, row, 0, row.length) > 0) {
        if (metrics != null)
            metrics.incrMetaCacheHit();
        return possibleRegion;
    }
    // Passed all the way through, so we got nothing - complete cache miss
    if (metrics != null)
        metrics.incrMetaCacheMiss();
    return null;
}
Also used : RegionLocations(org.apache.hadoop.hbase.RegionLocations)

Example 37 with RegionLocations

use of org.apache.hadoop.hbase.RegionLocations in project hbase by apache.

the class ScannerCallable method prepare.

/**
   * @param reload force reload of server location
   * @throws IOException
   */
@Override
public void prepare(boolean reload) throws IOException {
    if (Thread.interrupted()) {
        throw new InterruptedIOException();
    }
    RegionLocations rl = RpcRetryingCallerWithReadReplicas.getRegionLocations(!reload, id, getConnection(), getTableName(), getRow());
    location = id < rl.size() ? rl.getRegionLocation(id) : null;
    if (location == null || location.getServerName() == null) {
        //  when the table is created or after a split.
        throw new HBaseIOException("There is no location for replica id #" + id);
    }
    ServerName dest = location.getServerName();
    setStub(super.getConnection().getClient(dest));
    if (!instantiated || reload) {
        checkIfRegionServerIsRemote();
        instantiated = true;
    }
    // check how often we retry.
    if (reload && this.scanMetrics != null) {
        this.scanMetrics.countOfRPCRetries.incrementAndGet();
        if (isRegionServerRemote) {
            this.scanMetrics.countOfRemoteRPCRetries.incrementAndGet();
        }
    }
}
Also used : InterruptedIOException(java.io.InterruptedIOException) RegionLocations(org.apache.hadoop.hbase.RegionLocations) HBaseIOException(org.apache.hadoop.hbase.HBaseIOException) ServerName(org.apache.hadoop.hbase.ServerName)

Example 38 with RegionLocations

use of org.apache.hadoop.hbase.RegionLocations in project hbase by apache.

the class ScannerCallableWithReplicas method call.

@Override
public Result[] call(int timeout) throws IOException {
    // the closed flag. Then an RPC is required to actually close the scanner.
    if (currentScannerCallable != null && currentScannerCallable.closed) {
        // the case of normal reads)
        if (LOG.isTraceEnabled()) {
            LOG.trace("Closing scanner id=" + currentScannerCallable.scannerId);
        }
        Result[] r = currentScannerCallable.call(timeout);
        currentScannerCallable = null;
        return r;
    }
    // We need to do the following:
    //1. When a scan goes out to a certain replica (default or not), we need to
    //   continue to hit that until there is a failure. So store the last successfully invoked
    //   replica
    //2. We should close the "losing" scanners (scanners other than the ones we hear back
    //   from first)
    //
    RegionLocations rl = RpcRetryingCallerWithReadReplicas.getRegionLocations(true, RegionReplicaUtil.DEFAULT_REPLICA_ID, cConnection, tableName, currentScannerCallable.getRow());
    // allocate a boundedcompletion pool of some multiple of number of replicas.
    // We want to accomodate some RPCs for redundant replica scans (but are still in progress)
    ResultBoundedCompletionService<Pair<Result[], ScannerCallable>> cs = new ResultBoundedCompletionService<>(RpcRetryingCallerFactory.instantiate(ScannerCallableWithReplicas.this.conf), pool, rl.size() * 5);
    AtomicBoolean done = new AtomicBoolean(false);
    replicaSwitched.set(false);
    // submit call for the primary replica.
    addCallsForCurrentReplica(cs, rl);
    int startIndex = 0;
    try {
        // wait for the timeout to see whether the primary responds back
        Future<Pair<Result[], ScannerCallable>> f = cs.poll(timeBeforeReplicas, // Yes, microseconds
        TimeUnit.MICROSECONDS);
        if (f != null) {
            // After poll, if f is not null, there must be a completed task
            Pair<Result[], ScannerCallable> r = f.get();
            if (r != null && r.getSecond() != null) {
                updateCurrentlyServingReplica(r.getSecond(), r.getFirst(), done, pool);
            }
            //great we got a response
            return r == null ? null : r.getFirst();
        }
    } catch (ExecutionException e) {
        // We ignore the ExecutionException and continue with the replicas
        if (LOG.isDebugEnabled()) {
            LOG.debug("Scan with primary region returns " + e.getCause());
        }
        // out the exception from the primary replica
        if ((rl.size() == 1) || (scan.getConsistency() == Consistency.STRONG)) {
            // Rethrow the first exception
            RpcRetryingCallerWithReadReplicas.throwEnrichedException(e, retries);
        }
        startIndex = 1;
    } catch (CancellationException e) {
        throw new InterruptedIOException(e.getMessage());
    } catch (InterruptedException e) {
        throw new InterruptedIOException(e.getMessage());
    }
    // submit call for the all of the secondaries at once
    int endIndex = rl.size();
    if (scan.getConsistency() == Consistency.STRONG) {
        // When scan's consistency is strong, do not send to the secondaries
        endIndex = 1;
    } else {
        // TODO: this may be an overkill for large region replication
        addCallsForOtherReplicas(cs, rl, 0, rl.size() - 1);
    }
    try {
        Future<Pair<Result[], ScannerCallable>> f = cs.pollForFirstSuccessfullyCompletedTask(timeout, TimeUnit.MILLISECONDS, startIndex, endIndex);
        if (f == null) {
            throw new IOException("Failed to get result within timeout, timeout=" + timeout + "ms");
        }
        Pair<Result[], ScannerCallable> r = f.get();
        if (r != null && r.getSecond() != null) {
            updateCurrentlyServingReplica(r.getSecond(), r.getFirst(), done, pool);
        }
        // great we got an answer
        return r == null ? null : r.getFirst();
    } catch (ExecutionException e) {
        RpcRetryingCallerWithReadReplicas.throwEnrichedException(e, retries);
    } catch (CancellationException e) {
        throw new InterruptedIOException(e.getMessage());
    } catch (InterruptedException e) {
        throw new InterruptedIOException(e.getMessage());
    } finally {
        // We get there because we were interrupted or because one or more of the
        // calls succeeded or failed. In all case, we stop all our tasks.
        cs.cancelAll();
    }
    // unreachable
    LOG.error("Imposible? Arrive at an unreachable line...");
    throw new IOException("Imposible? Arrive at an unreachable line...");
}
Also used : RegionLocations(org.apache.hadoop.hbase.RegionLocations) InterruptedIOException(java.io.InterruptedIOException) IOException(java.io.IOException) InterruptedIOException(java.io.InterruptedIOException) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) CancellationException(java.util.concurrent.CancellationException) ExecutionException(java.util.concurrent.ExecutionException) Pair(org.apache.hadoop.hbase.util.Pair)

Example 39 with RegionLocations

use of org.apache.hadoop.hbase.RegionLocations in project hbase by apache.

the class ReversedScannerCallable method prepare.

/**
   * @param reload force reload of server location
   * @throws IOException
   */
@Override
public void prepare(boolean reload) throws IOException {
    if (Thread.interrupted()) {
        throw new InterruptedIOException();
    }
    if (!instantiated || reload) {
        // 2. the start row is empty which means we need to locate to the last region.
        if (scan.includeStartRow() && !isEmptyStartRow(getRow())) {
            // Just locate the region with the row
            RegionLocations rl = RpcRetryingCallerWithReadReplicas.getRegionLocations(reload, id, getConnection(), getTableName(), getRow());
            this.location = id < rl.size() ? rl.getRegionLocation(id) : null;
            if (location == null || location.getServerName() == null) {
                throw new IOException("Failed to find location, tableName=" + getTableName() + ", row=" + Bytes.toStringBinary(getRow()) + ", reload=" + reload);
            }
        } else {
            // Need to locate the regions with the range, and the target location is
            // the last one which is the previous region of last region scanner
            byte[] locateStartRow = createCloseRowBefore(getRow());
            List<HRegionLocation> locatedRegions = locateRegionsInRange(locateStartRow, getRow(), reload);
            if (locatedRegions.isEmpty()) {
                throw new DoNotRetryIOException("Does hbase:meta exist hole? Couldn't get regions for the range from " + Bytes.toStringBinary(locateStartRow) + " to " + Bytes.toStringBinary(getRow()));
            }
            this.location = locatedRegions.get(locatedRegions.size() - 1);
        }
        setStub(getConnection().getClient(getLocation().getServerName()));
        checkIfRegionServerIsRemote();
        instantiated = true;
    }
    // check how often we retry.
    if (reload && this.scanMetrics != null) {
        this.scanMetrics.countOfRPCRetries.incrementAndGet();
        if (isRegionServerRemote) {
            this.scanMetrics.countOfRemoteRPCRetries.incrementAndGet();
        }
    }
}
Also used : InterruptedIOException(java.io.InterruptedIOException) RegionLocations(org.apache.hadoop.hbase.RegionLocations) HRegionLocation(org.apache.hadoop.hbase.HRegionLocation) DoNotRetryIOException(org.apache.hadoop.hbase.DoNotRetryIOException) DoNotRetryIOException(org.apache.hadoop.hbase.DoNotRetryIOException) IOException(java.io.IOException) InterruptedIOException(java.io.InterruptedIOException)

Example 40 with RegionLocations

use of org.apache.hadoop.hbase.RegionLocations in project hbase by apache.

the class ReversedScannerCallable method locateRegionsInRange.

/**
   * Get the corresponding regions for an arbitrary range of keys.
   * @param startKey Starting row in range, inclusive
   * @param endKey Ending row in range, exclusive
   * @param reload force reload of server location
   * @return A list of HRegionLocation corresponding to the regions that contain
   *         the specified range
   * @throws IOException
   */
private List<HRegionLocation> locateRegionsInRange(byte[] startKey, byte[] endKey, boolean reload) throws IOException {
    final boolean endKeyIsEndOfTable = Bytes.equals(endKey, HConstants.EMPTY_END_ROW);
    if ((Bytes.compareTo(startKey, endKey) > 0) && !endKeyIsEndOfTable) {
        throw new IllegalArgumentException("Invalid range: " + Bytes.toStringBinary(startKey) + " > " + Bytes.toStringBinary(endKey));
    }
    List<HRegionLocation> regionList = new ArrayList<>();
    byte[] currentKey = startKey;
    do {
        RegionLocations rl = RpcRetryingCallerWithReadReplicas.getRegionLocations(reload, id, getConnection(), getTableName(), currentKey);
        HRegionLocation regionLocation = id < rl.size() ? rl.getRegionLocation(id) : null;
        if (regionLocation != null && regionLocation.getRegionInfo().containsRow(currentKey)) {
            regionList.add(regionLocation);
        } else {
            throw new DoNotRetryIOException("Does hbase:meta exist hole? Locating row " + Bytes.toStringBinary(currentKey) + " returns incorrect region " + (regionLocation == null ? null : regionLocation.getRegionInfo()));
        }
        currentKey = regionLocation.getRegionInfo().getEndKey();
    } while (!Bytes.equals(currentKey, HConstants.EMPTY_END_ROW) && (endKeyIsEndOfTable || Bytes.compareTo(currentKey, endKey) < 0));
    return regionList;
}
Also used : RegionLocations(org.apache.hadoop.hbase.RegionLocations) HRegionLocation(org.apache.hadoop.hbase.HRegionLocation) DoNotRetryIOException(org.apache.hadoop.hbase.DoNotRetryIOException) ArrayList(java.util.ArrayList)

Aggregations

RegionLocations (org.apache.hadoop.hbase.RegionLocations)47 HRegionLocation (org.apache.hadoop.hbase.HRegionLocation)28 ServerName (org.apache.hadoop.hbase.ServerName)18 HRegionInfo (org.apache.hadoop.hbase.HRegionInfo)13 Test (org.junit.Test)9 IOException (java.io.IOException)8 InterruptedIOException (java.io.InterruptedIOException)7 ArrayList (java.util.ArrayList)6 Result (org.apache.hadoop.hbase.client.Result)6 TableName (org.apache.hadoop.hbase.TableName)5 DoNotRetryIOException (org.apache.hadoop.hbase.DoNotRetryIOException)4 MetaTableAccessor (org.apache.hadoop.hbase.MetaTableAccessor)4 Pair (org.apache.hadoop.hbase.util.Pair)4 HashMap (java.util.HashMap)3 HashSet (java.util.HashSet)2 CancellationException (java.util.concurrent.CancellationException)2 ExecutionException (java.util.concurrent.ExecutionException)2 AtomicInteger (java.util.concurrent.atomic.AtomicInteger)2 HBaseIOException (org.apache.hadoop.hbase.HBaseIOException)2 HColumnDescriptor (org.apache.hadoop.hbase.HColumnDescriptor)2