Search in sources :

Example 31 with RegionLocations

use of org.apache.hadoop.hbase.RegionLocations in project hbase by apache.

the class MetaCache method clearCache.

public void clearCache(final HRegionLocation location) {
    if (location == null) {
        return;
    }
    TableName tableName = location.getRegionInfo().getTable();
    ConcurrentMap<byte[], RegionLocations> tableLocations = getTableLocations(tableName);
    RegionLocations regionLocations = tableLocations.get(location.getRegionInfo().getStartKey());
    if (regionLocations != null) {
        RegionLocations updatedLocations = regionLocations.remove(location);
        boolean removed;
        if (updatedLocations != regionLocations) {
            if (updatedLocations.isEmpty()) {
                removed = tableLocations.remove(location.getRegionInfo().getStartKey(), regionLocations);
            } else {
                removed = tableLocations.replace(location.getRegionInfo().getStartKey(), regionLocations, updatedLocations);
            }
            if (removed) {
                if (metrics != null) {
                    metrics.incrMetaCacheNumClearRegion();
                }
                if (LOG.isTraceEnabled()) {
                    LOG.trace("Removed " + location + " from cache");
                }
            }
        }
    }
}
Also used : TableName(org.apache.hadoop.hbase.TableName) RegionLocations(org.apache.hadoop.hbase.RegionLocations)

Example 32 with RegionLocations

use of org.apache.hadoop.hbase.RegionLocations in project hbase by apache.

the class MetaCache method getNumberOfCachedRegionLocations.

/**
   * Return the number of cached region for a table. It will only be called
   * from a unit test.
   */
public int getNumberOfCachedRegionLocations(final TableName tableName) {
    Map<byte[], RegionLocations> tableLocs = this.cachedRegionLocations.get(tableName);
    if (tableLocs == null) {
        return 0;
    }
    int numRegions = 0;
    for (RegionLocations tableLoc : tableLocs.values()) {
        numRegions += tableLoc.numNonNullElements();
    }
    return numRegions;
}
Also used : RegionLocations(org.apache.hadoop.hbase.RegionLocations)

Example 33 with RegionLocations

use of org.apache.hadoop.hbase.RegionLocations in project hbase by apache.

the class MultiThreadedAction method printLocations.

private void printLocations(Result r) {
    RegionLocations rl = null;
    if (r == null) {
        LOG.info("FAILED FOR null Result");
        return;
    }
    LOG.info("FAILED FOR " + resultToString(r) + " Stale " + r.isStale());
    if (r.getRow() == null) {
        return;
    }
    try {
        rl = ((ClusterConnection) connection).locateRegion(tableName, r.getRow(), true, true);
    } catch (IOException e) {
        LOG.warn("Couldn't get locations for row " + Bytes.toString(r.getRow()));
    }
    HRegionLocation[] locations = rl.getRegionLocations();
    for (HRegionLocation h : locations) {
        LOG.info("LOCATION " + h);
    }
}
Also used : RegionLocations(org.apache.hadoop.hbase.RegionLocations) HRegionLocation(org.apache.hadoop.hbase.HRegionLocation) IOException(java.io.IOException)

Example 34 with RegionLocations

use of org.apache.hadoop.hbase.RegionLocations in project hbase by apache.

the class ZKAsyncRegistry method getMetaRegionLocation.

@Override
public CompletableFuture<RegionLocations> getMetaRegionLocation() {
    CompletableFuture<RegionLocations> future = new CompletableFuture<>();
    HRegionLocation[] locs = new HRegionLocation[znodePaths.metaReplicaZNodes.size()];
    MutableInt remaining = new MutableInt(locs.length);
    znodePaths.metaReplicaZNodes.forEach((replicaId, path) -> {
        if (replicaId == DEFAULT_REPLICA_ID) {
            exec(zk.getData(), path, ZKAsyncRegistry::getMetaProto).whenComplete((proto, error) -> {
                if (error != null) {
                    future.completeExceptionally(error);
                    return;
                }
                if (proto == null) {
                    future.completeExceptionally(new IOException("Meta znode is null"));
                    return;
                }
                Pair<RegionState.State, ServerName> stateAndServerName = getStateAndServerName(proto);
                if (stateAndServerName.getFirst() != RegionState.State.OPEN) {
                    future.completeExceptionally(new IOException("Meta region is in state " + stateAndServerName.getFirst()));
                    return;
                }
                locs[DEFAULT_REPLICA_ID] = new HRegionLocation(getRegionInfoForDefaultReplica(FIRST_META_REGIONINFO), stateAndServerName.getSecond());
                tryComplete(remaining, locs, future);
            });
        } else {
            exec(zk.getData(), path, ZKAsyncRegistry::getMetaProto).whenComplete((proto, error) -> {
                if (future.isDone()) {
                    return;
                }
                if (error != null) {
                    LOG.warn("Failed to fetch " + path, error);
                    locs[replicaId] = null;
                } else if (proto == null) {
                    LOG.warn("Meta znode for replica " + replicaId + " is null");
                    locs[replicaId] = null;
                } else {
                    Pair<RegionState.State, ServerName> stateAndServerName = getStateAndServerName(proto);
                    if (stateAndServerName.getFirst() != RegionState.State.OPEN) {
                        LOG.warn("Meta region for replica " + replicaId + " is in state " + stateAndServerName.getFirst());
                        locs[replicaId] = null;
                    } else {
                        locs[replicaId] = new HRegionLocation(getRegionInfoForReplica(FIRST_META_REGIONINFO, replicaId), stateAndServerName.getSecond());
                    }
                }
                tryComplete(remaining, locs, future);
            });
        }
    });
    return future;
}
Also used : RegionLocations(org.apache.hadoop.hbase.RegionLocations) CompletableFuture(java.util.concurrent.CompletableFuture) RegionState(org.apache.hadoop.hbase.master.RegionState) HRegionLocation(org.apache.hadoop.hbase.HRegionLocation) RegionState(org.apache.hadoop.hbase.master.RegionState) MutableInt(org.apache.commons.lang.mutable.MutableInt) ServerName(org.apache.hadoop.hbase.ServerName) IOException(java.io.IOException) Pair(org.apache.hadoop.hbase.util.Pair)

Example 35 with RegionLocations

use of org.apache.hadoop.hbase.RegionLocations in project hbase by apache.

the class RegionAdminServiceCallable method getLocation.

public HRegionLocation getLocation(boolean useCache) throws IOException {
    RegionLocations rl = getRegionLocations(connection, tableName, row, useCache, replicaId);
    if (rl == null) {
        throw new HBaseIOException(getExceptionMessage());
    }
    HRegionLocation location = rl.getRegionLocation(replicaId);
    if (location == null) {
        throw new HBaseIOException(getExceptionMessage());
    }
    return location;
}
Also used : RegionLocations(org.apache.hadoop.hbase.RegionLocations) HRegionLocation(org.apache.hadoop.hbase.HRegionLocation) HBaseIOException(org.apache.hadoop.hbase.HBaseIOException)

Aggregations

RegionLocations (org.apache.hadoop.hbase.RegionLocations)47 HRegionLocation (org.apache.hadoop.hbase.HRegionLocation)28 ServerName (org.apache.hadoop.hbase.ServerName)18 HRegionInfo (org.apache.hadoop.hbase.HRegionInfo)13 Test (org.junit.Test)9 IOException (java.io.IOException)8 InterruptedIOException (java.io.InterruptedIOException)7 ArrayList (java.util.ArrayList)6 Result (org.apache.hadoop.hbase.client.Result)6 TableName (org.apache.hadoop.hbase.TableName)5 DoNotRetryIOException (org.apache.hadoop.hbase.DoNotRetryIOException)4 MetaTableAccessor (org.apache.hadoop.hbase.MetaTableAccessor)4 Pair (org.apache.hadoop.hbase.util.Pair)4 HashMap (java.util.HashMap)3 HashSet (java.util.HashSet)2 CancellationException (java.util.concurrent.CancellationException)2 ExecutionException (java.util.concurrent.ExecutionException)2 AtomicInteger (java.util.concurrent.atomic.AtomicInteger)2 HBaseIOException (org.apache.hadoop.hbase.HBaseIOException)2 HColumnDescriptor (org.apache.hadoop.hbase.HColumnDescriptor)2