Search in sources :

Example 1 with RegionLocations

use of org.apache.hadoop.hbase.RegionLocations in project hbase by apache.

the class ConnectionImplementation method locateRegionInMeta.

/*
    * Search the hbase:meta table for the HRegionLocation
    * info that contains the table and row we're seeking.
    */
private RegionLocations locateRegionInMeta(TableName tableName, byte[] row, boolean useCache, boolean retry, int replicaId) throws IOException {
    // we already have the region.
    if (useCache) {
        RegionLocations locations = getCachedLocation(tableName, row);
        if (locations != null && locations.getRegionLocation(replicaId) != null) {
            return locations;
        }
    }
    // build the key of the meta region we should be looking for.
    // the extra 9's on the end are necessary to allow "exact" matches
    // without knowing the precise region names.
    byte[] metaKey = HRegionInfo.createRegionName(tableName, row, HConstants.NINES, false);
    Scan s = new Scan();
    s.setReversed(true);
    s.withStartRow(metaKey);
    s.addFamily(HConstants.CATALOG_FAMILY);
    s.setOneRowLimit();
    if (this.useMetaReplicas) {
        s.setConsistency(Consistency.TIMELINE);
    }
    int maxAttempts = (retry ? numTries : 1);
    for (int tries = 0; true; tries++) {
        if (tries >= maxAttempts) {
            throw new NoServerForRegionException("Unable to find region for " + Bytes.toStringBinary(row) + " in " + tableName + " after " + tries + " tries.");
        }
        if (useCache) {
            RegionLocations locations = getCachedLocation(tableName, row);
            if (locations != null && locations.getRegionLocation(replicaId) != null) {
                return locations;
            }
        } else {
            // If we are not supposed to be using the cache, delete any existing cached location
            // so it won't interfere.
            metaCache.clearCache(tableName, row);
        }
        // Query the meta region
        long pauseBase = this.pause;
        try {
            Result regionInfoRow = null;
            s.resetMvccReadPoint();
            try (ReversedClientScanner rcs = new ReversedClientScanner(conf, s, TableName.META_TABLE_NAME, this, rpcCallerFactory, rpcControllerFactory, getMetaLookupPool(), 0)) {
                regionInfoRow = rcs.next();
            }
            if (regionInfoRow == null) {
                throw new TableNotFoundException(tableName);
            }
            // convert the row result into the HRegionLocation we need!
            RegionLocations locations = MetaTableAccessor.getRegionLocations(regionInfoRow);
            if (locations == null || locations.getRegionLocation(replicaId) == null) {
                throw new IOException("HRegionInfo was null in " + tableName + ", row=" + regionInfoRow);
            }
            HRegionInfo regionInfo = locations.getRegionLocation(replicaId).getRegionInfo();
            if (regionInfo == null) {
                throw new IOException("HRegionInfo was null or empty in " + TableName.META_TABLE_NAME + ", row=" + regionInfoRow);
            }
            // possible we got a region of a different table...
            if (!regionInfo.getTable().equals(tableName)) {
                throw new TableNotFoundException("Table '" + tableName + "' was not found, got: " + regionInfo.getTable() + ".");
            }
            if (regionInfo.isSplit()) {
                throw new RegionOfflineException("the only available region for" + " the required row is a split parent," + " the daughters should be online soon: " + regionInfo.getRegionNameAsString());
            }
            if (regionInfo.isOffline()) {
                throw new RegionOfflineException("the region is offline, could" + " be caused by a disable table call: " + regionInfo.getRegionNameAsString());
            }
            ServerName serverName = locations.getRegionLocation(replicaId).getServerName();
            if (serverName == null) {
                throw new NoServerForRegionException("No server address listed " + "in " + TableName.META_TABLE_NAME + " for region " + regionInfo.getRegionNameAsString() + " containing row " + Bytes.toStringBinary(row));
            }
            if (isDeadServer(serverName)) {
                throw new RegionServerStoppedException("hbase:meta says the region " + regionInfo.getRegionNameAsString() + " is managed by the server " + serverName + ", but it is dead.");
            }
            // Instantiate the location
            cacheLocation(tableName, locations);
            return locations;
        } catch (TableNotFoundException e) {
            // from the HTable constructor.
            throw e;
        } catch (IOException e) {
            ExceptionUtil.rethrowIfInterrupt(e);
            if (e instanceof RemoteException) {
                e = ((RemoteException) e).unwrapRemoteException();
            }
            if (e instanceof CallQueueTooBigException) {
                // Give a special check on CallQueueTooBigException, see #HBASE-17114
                pauseBase = this.pauseForCQTBE;
            }
            if (tries < maxAttempts - 1) {
                if (LOG.isDebugEnabled()) {
                    LOG.debug("locateRegionInMeta parentTable=" + TableName.META_TABLE_NAME + ", metaLocation=" + ", attempt=" + tries + " of " + maxAttempts + " failed; retrying after sleep of " + ConnectionUtils.getPauseTime(pauseBase, tries) + " because: " + e.getMessage());
                }
            } else {
                throw e;
            }
            // Only relocate the parent region if necessary
            if (!(e instanceof RegionOfflineException || e instanceof NoServerForRegionException)) {
                relocateRegion(TableName.META_TABLE_NAME, metaKey, replicaId);
            }
        }
        try {
            Thread.sleep(ConnectionUtils.getPauseTime(pauseBase, tries));
        } catch (InterruptedException e) {
            throw new InterruptedIOException("Giving up trying to location region in " + "meta: thread is interrupted.");
        }
    }
}
Also used : RegionLocations(org.apache.hadoop.hbase.RegionLocations) InterruptedIOException(java.io.InterruptedIOException) CallQueueTooBigException(org.apache.hadoop.hbase.CallQueueTooBigException) InterruptedIOException(java.io.InterruptedIOException) IOException(java.io.IOException) DoNotRetryIOException(org.apache.hadoop.hbase.DoNotRetryIOException) HRegionInfo(org.apache.hadoop.hbase.HRegionInfo) TableNotFoundException(org.apache.hadoop.hbase.TableNotFoundException) RegionServerStoppedException(org.apache.hadoop.hbase.regionserver.RegionServerStoppedException) ServerName(org.apache.hadoop.hbase.ServerName) RemoteException(org.apache.hadoop.ipc.RemoteException)

Example 2 with RegionLocations

use of org.apache.hadoop.hbase.RegionLocations in project hbase by apache.

the class ConnectionImplementation method locateRegions.

@Override
public List<HRegionLocation> locateRegions(final TableName tableName, final boolean useCache, final boolean offlined) throws IOException {
    List<HRegionInfo> regions = MetaTableAccessor.getTableRegions(this, tableName, !offlined);
    final List<HRegionLocation> locations = new ArrayList<>();
    for (HRegionInfo regionInfo : regions) {
        RegionLocations list = locateRegion(tableName, regionInfo.getStartKey(), useCache, true);
        if (list != null) {
            for (HRegionLocation loc : list.getRegionLocations()) {
                if (loc != null) {
                    locations.add(loc);
                }
            }
        }
    }
    return locations;
}
Also used : HRegionInfo(org.apache.hadoop.hbase.HRegionInfo) RegionLocations(org.apache.hadoop.hbase.RegionLocations) HRegionLocation(org.apache.hadoop.hbase.HRegionLocation) ArrayList(java.util.ArrayList)

Example 3 with RegionLocations

use of org.apache.hadoop.hbase.RegionLocations in project hbase by apache.

the class AsyncRequestFutureImpl method getReplicaLocationOrFail.

private HRegionLocation getReplicaLocationOrFail(Action action) {
    // We are going to try get location once again. For each action, we'll do it once
    // from cache, because the previous calls in the loop might populate it.
    int replicaId = action.getReplicaId();
    RegionLocations locs = findAllLocationsOrFail(action, true);
    // manageError already called
    if (locs == null)
        return null;
    HRegionLocation loc = locs.getRegionLocation(replicaId);
    if (loc == null || loc.getServerName() == null) {
        locs = findAllLocationsOrFail(action, false);
        // manageError already called
        if (locs == null)
            return null;
        loc = locs.getRegionLocation(replicaId);
    }
    if (loc == null || loc.getServerName() == null) {
        manageLocationError(action, null);
        return null;
    }
    return loc;
}
Also used : RegionLocations(org.apache.hadoop.hbase.RegionLocations) HRegionLocation(org.apache.hadoop.hbase.HRegionLocation)

Example 4 with RegionLocations

use of org.apache.hadoop.hbase.RegionLocations in project hbase by apache.

the class TestClientScanner method testExceptionsFromReplicasArePropagated.

/**
   * Tests the case where all replicas of a region throw an exception. It should not cause a hang
   * but the exception should propagate to the client
   */
@Test(timeout = 30000)
public void testExceptionsFromReplicasArePropagated() throws IOException {
    scan.setConsistency(Consistency.TIMELINE);
    // Mock a caller which calls the callable for ScannerCallableWithReplicas,
    // but throws an exception for the actual scanner calls via callWithRetries.
    rpcFactory = new MockRpcRetryingCallerFactory(conf);
    conf.set(RpcRetryingCallerFactory.CUSTOM_CALLER_CONF_KEY, MockRpcRetryingCallerFactory.class.getName());
    // mock 3 replica locations
    when(clusterConn.locateRegion((TableName) any(), (byte[]) any(), anyBoolean(), anyBoolean(), anyInt())).thenReturn(new RegionLocations(null, null, null));
    try (MockClientScanner scanner = new MockClientScanner(conf, scan, TableName.valueOf(name.getMethodName()), clusterConn, rpcFactory, new RpcControllerFactory(conf), pool, Integer.MAX_VALUE)) {
        Iterator<Result> iter = scanner.iterator();
        while (iter.hasNext()) {
            iter.next();
        }
        fail("Should have failed with RetriesExhaustedException");
    } catch (RuntimeException expected) {
        assertThat(expected.getCause(), instanceOf(RetriesExhaustedException.class));
    }
}
Also used : RegionLocations(org.apache.hadoop.hbase.RegionLocations) RpcControllerFactory(org.apache.hadoop.hbase.ipc.RpcControllerFactory) Test(org.junit.Test)

Example 5 with RegionLocations

use of org.apache.hadoop.hbase.RegionLocations in project hbase by apache.

the class AssignmentManager method rebuildUserRegions.

/**
   * Rebuild the list of user regions and assignment information.
   * Updates regionstates with findings as we go through list of regions.
   * @return set of servers not online that hosted some regions according to a scan of hbase:meta
   * @throws IOException
   */
Set<ServerName> rebuildUserRegions() throws IOException, KeeperException {
    Set<TableName> disabledOrEnablingTables = tableStateManager.getTablesInStates(TableState.State.DISABLED, TableState.State.ENABLING);
    Set<TableName> disabledOrDisablingOrEnabling = tableStateManager.getTablesInStates(TableState.State.DISABLED, TableState.State.DISABLING, TableState.State.ENABLING);
    // Region assignment from META
    List<Result> results = MetaTableAccessor.fullScanRegions(server.getConnection());
    // Get any new but slow to checkin region server that joined the cluster
    Set<ServerName> onlineServers = serverManager.getOnlineServers().keySet();
    // Set of offline servers to be returned
    Set<ServerName> offlineServers = new HashSet<>();
    // Iterate regions in META
    for (Result result : results) {
        if (result == null && LOG.isDebugEnabled()) {
            LOG.debug("null result from meta - ignoring but this is strange.");
            continue;
        }
        // keep a track of replicas to close. These were the replicas of the originally
        // unmerged regions. The master might have closed them before but it mightn't
        // maybe because it crashed.
        PairOfSameType<HRegionInfo> p = MetaTableAccessor.getMergeRegions(result);
        if (p.getFirst() != null && p.getSecond() != null) {
            int numReplicas = getNumReplicas(server, p.getFirst().getTable());
            for (HRegionInfo merge : p) {
                for (int i = 1; i < numReplicas; i++) {
                    replicasToClose.add(RegionReplicaUtil.getRegionInfoForReplica(merge, i));
                }
            }
        }
        RegionLocations rl = MetaTableAccessor.getRegionLocations(result);
        if (rl == null) {
            continue;
        }
        HRegionLocation[] locations = rl.getRegionLocations();
        if (locations == null) {
            continue;
        }
        for (HRegionLocation hrl : locations) {
            if (hrl == null)
                continue;
            HRegionInfo regionInfo = hrl.getRegionInfo();
            if (regionInfo == null)
                continue;
            int replicaId = regionInfo.getReplicaId();
            State state = RegionStateStore.getRegionState(result, replicaId);
            // but it couldn't maybe because it crashed
            if (replicaId == 0 && state.equals(State.SPLIT)) {
                for (HRegionLocation h : locations) {
                    replicasToClose.add(h.getRegionInfo());
                }
            }
            ServerName lastHost = hrl.getServerName();
            ServerName regionLocation = RegionStateStore.getRegionServer(result, replicaId);
            regionStates.createRegionState(regionInfo, state, regionLocation, lastHost);
            if (!regionStates.isRegionInState(regionInfo, State.OPEN)) {
                // Region is not open (either offline or in transition), skip
                continue;
            }
            TableName tableName = regionInfo.getTable();
            if (!onlineServers.contains(regionLocation)) {
                // Region is located on a server that isn't online
                offlineServers.add(regionLocation);
            } else if (!disabledOrEnablingTables.contains(tableName)) {
                // Region is being served and on an active server
                // add only if region not in disabled or enabling table
                regionStates.regionOnline(regionInfo, regionLocation);
                balancer.regionOnline(regionInfo, regionLocation);
            }
            // this will be used in rolling restarts
            if (!disabledOrDisablingOrEnabling.contains(tableName) && !getTableStateManager().isTableState(tableName, TableState.State.ENABLED)) {
                setEnabledTable(tableName);
            }
        }
    }
    return offlineServers;
}
Also used : RegionLocations(org.apache.hadoop.hbase.RegionLocations) Result(org.apache.hadoop.hbase.client.Result) HRegionInfo(org.apache.hadoop.hbase.HRegionInfo) TableName(org.apache.hadoop.hbase.TableName) HRegionLocation(org.apache.hadoop.hbase.HRegionLocation) State(org.apache.hadoop.hbase.master.RegionState.State) TableState(org.apache.hadoop.hbase.client.TableState) RegionOpeningState(org.apache.hadoop.hbase.regionserver.RegionOpeningState) ServerName(org.apache.hadoop.hbase.ServerName) HashSet(java.util.HashSet)

Aggregations

RegionLocations (org.apache.hadoop.hbase.RegionLocations)47 HRegionLocation (org.apache.hadoop.hbase.HRegionLocation)28 ServerName (org.apache.hadoop.hbase.ServerName)18 HRegionInfo (org.apache.hadoop.hbase.HRegionInfo)13 Test (org.junit.Test)9 IOException (java.io.IOException)8 InterruptedIOException (java.io.InterruptedIOException)7 ArrayList (java.util.ArrayList)6 Result (org.apache.hadoop.hbase.client.Result)6 TableName (org.apache.hadoop.hbase.TableName)5 DoNotRetryIOException (org.apache.hadoop.hbase.DoNotRetryIOException)4 MetaTableAccessor (org.apache.hadoop.hbase.MetaTableAccessor)4 Pair (org.apache.hadoop.hbase.util.Pair)4 HashMap (java.util.HashMap)3 HashSet (java.util.HashSet)2 CancellationException (java.util.concurrent.CancellationException)2 ExecutionException (java.util.concurrent.ExecutionException)2 AtomicInteger (java.util.concurrent.atomic.AtomicInteger)2 HBaseIOException (org.apache.hadoop.hbase.HBaseIOException)2 HColumnDescriptor (org.apache.hadoop.hbase.HColumnDescriptor)2