use of org.apache.hadoop.hbase.HRegionInfo in project hbase by apache.
the class ConnectionImplementation method locateRegionInMeta.
/*
* Search the hbase:meta table for the HRegionLocation
* info that contains the table and row we're seeking.
*/
private RegionLocations locateRegionInMeta(TableName tableName, byte[] row, boolean useCache, boolean retry, int replicaId) throws IOException {
// we already have the region.
if (useCache) {
RegionLocations locations = getCachedLocation(tableName, row);
if (locations != null && locations.getRegionLocation(replicaId) != null) {
return locations;
}
}
// build the key of the meta region we should be looking for.
// the extra 9's on the end are necessary to allow "exact" matches
// without knowing the precise region names.
byte[] metaKey = HRegionInfo.createRegionName(tableName, row, HConstants.NINES, false);
Scan s = new Scan();
s.setReversed(true);
s.withStartRow(metaKey);
s.addFamily(HConstants.CATALOG_FAMILY);
s.setOneRowLimit();
if (this.useMetaReplicas) {
s.setConsistency(Consistency.TIMELINE);
}
int maxAttempts = (retry ? numTries : 1);
for (int tries = 0; true; tries++) {
if (tries >= maxAttempts) {
throw new NoServerForRegionException("Unable to find region for " + Bytes.toStringBinary(row) + " in " + tableName + " after " + tries + " tries.");
}
if (useCache) {
RegionLocations locations = getCachedLocation(tableName, row);
if (locations != null && locations.getRegionLocation(replicaId) != null) {
return locations;
}
} else {
// If we are not supposed to be using the cache, delete any existing cached location
// so it won't interfere.
metaCache.clearCache(tableName, row);
}
// Query the meta region
long pauseBase = this.pause;
try {
Result regionInfoRow = null;
s.resetMvccReadPoint();
try (ReversedClientScanner rcs = new ReversedClientScanner(conf, s, TableName.META_TABLE_NAME, this, rpcCallerFactory, rpcControllerFactory, getMetaLookupPool(), 0)) {
regionInfoRow = rcs.next();
}
if (regionInfoRow == null) {
throw new TableNotFoundException(tableName);
}
// convert the row result into the HRegionLocation we need!
RegionLocations locations = MetaTableAccessor.getRegionLocations(regionInfoRow);
if (locations == null || locations.getRegionLocation(replicaId) == null) {
throw new IOException("HRegionInfo was null in " + tableName + ", row=" + regionInfoRow);
}
HRegionInfo regionInfo = locations.getRegionLocation(replicaId).getRegionInfo();
if (regionInfo == null) {
throw new IOException("HRegionInfo was null or empty in " + TableName.META_TABLE_NAME + ", row=" + regionInfoRow);
}
// possible we got a region of a different table...
if (!regionInfo.getTable().equals(tableName)) {
throw new TableNotFoundException("Table '" + tableName + "' was not found, got: " + regionInfo.getTable() + ".");
}
if (regionInfo.isSplit()) {
throw new RegionOfflineException("the only available region for" + " the required row is a split parent," + " the daughters should be online soon: " + regionInfo.getRegionNameAsString());
}
if (regionInfo.isOffline()) {
throw new RegionOfflineException("the region is offline, could" + " be caused by a disable table call: " + regionInfo.getRegionNameAsString());
}
ServerName serverName = locations.getRegionLocation(replicaId).getServerName();
if (serverName == null) {
throw new NoServerForRegionException("No server address listed " + "in " + TableName.META_TABLE_NAME + " for region " + regionInfo.getRegionNameAsString() + " containing row " + Bytes.toStringBinary(row));
}
if (isDeadServer(serverName)) {
throw new RegionServerStoppedException("hbase:meta says the region " + regionInfo.getRegionNameAsString() + " is managed by the server " + serverName + ", but it is dead.");
}
// Instantiate the location
cacheLocation(tableName, locations);
return locations;
} catch (TableNotFoundException e) {
// from the HTable constructor.
throw e;
} catch (IOException e) {
ExceptionUtil.rethrowIfInterrupt(e);
if (e instanceof RemoteException) {
e = ((RemoteException) e).unwrapRemoteException();
}
if (e instanceof CallQueueTooBigException) {
// Give a special check on CallQueueTooBigException, see #HBASE-17114
pauseBase = this.pauseForCQTBE;
}
if (tries < maxAttempts - 1) {
if (LOG.isDebugEnabled()) {
LOG.debug("locateRegionInMeta parentTable=" + TableName.META_TABLE_NAME + ", metaLocation=" + ", attempt=" + tries + " of " + maxAttempts + " failed; retrying after sleep of " + ConnectionUtils.getPauseTime(pauseBase, tries) + " because: " + e.getMessage());
}
} else {
throw e;
}
// Only relocate the parent region if necessary
if (!(e instanceof RegionOfflineException || e instanceof NoServerForRegionException)) {
relocateRegion(TableName.META_TABLE_NAME, metaKey, replicaId);
}
}
try {
Thread.sleep(ConnectionUtils.getPauseTime(pauseBase, tries));
} catch (InterruptedException e) {
throw new InterruptedIOException("Giving up trying to location region in " + "meta: thread is interrupted.");
}
}
}
use of org.apache.hadoop.hbase.HRegionInfo in project hbase by apache.
the class ConnectionImplementation method locateRegions.
@Override
public List<HRegionLocation> locateRegions(final TableName tableName, final boolean useCache, final boolean offlined) throws IOException {
List<HRegionInfo> regions = MetaTableAccessor.getTableRegions(this, tableName, !offlined);
final List<HRegionLocation> locations = new ArrayList<>();
for (HRegionInfo regionInfo : regions) {
RegionLocations list = locateRegion(tableName, regionInfo.getStartKey(), useCache, true);
if (list != null) {
for (HRegionLocation loc : list.getRegionLocations()) {
if (loc != null) {
locations.add(loc);
}
}
}
}
return locations;
}
use of org.apache.hadoop.hbase.HRegionInfo in project hbase by apache.
the class ConnectionImplementation method isTableAvailable.
@Override
public boolean isTableAvailable(final TableName tableName, @Nullable final byte[][] splitKeys) throws IOException {
if (this.closed) {
throw new IOException(toString() + " closed");
}
try {
if (!isTableEnabled(tableName)) {
LOG.debug("Table " + tableName + " not enabled");
return false;
}
List<Pair<HRegionInfo, ServerName>> locations = MetaTableAccessor.getTableRegionsAndLocations(this, tableName, true);
int notDeployed = 0;
int regionCount = 0;
for (Pair<HRegionInfo, ServerName> pair : locations) {
HRegionInfo info = pair.getFirst();
if (pair.getSecond() == null) {
if (LOG.isDebugEnabled()) {
LOG.debug("Table " + tableName + " has not deployed region " + pair.getFirst().getEncodedName());
}
notDeployed++;
} else if (splitKeys != null && !Bytes.equals(info.getStartKey(), HConstants.EMPTY_BYTE_ARRAY)) {
for (byte[] splitKey : splitKeys) {
// Just check if the splitkey is available
if (Bytes.equals(info.getStartKey(), splitKey)) {
regionCount++;
break;
}
}
} else {
// Always empty start row should be counted
regionCount++;
}
}
if (notDeployed > 0) {
if (LOG.isDebugEnabled()) {
LOG.debug("Table " + tableName + " has " + notDeployed + " regions");
}
return false;
} else if (splitKeys != null && regionCount != splitKeys.length + 1) {
if (LOG.isDebugEnabled()) {
LOG.debug("Table " + tableName + " expected to have " + (splitKeys.length + 1) + " regions, but only " + regionCount + " available");
}
return false;
} else {
if (LOG.isDebugEnabled()) {
LOG.debug("Table " + tableName + " should be available");
}
return true;
}
} catch (TableNotFoundException tnfe) {
LOG.warn("Table " + tableName + " not enabled, it is not exists");
return false;
}
}
use of org.apache.hadoop.hbase.HRegionInfo in project hbase by apache.
the class TestClientNoCluster method makeHRegionInfos.
/**
* @param count
* @param namespaceSpan
* @return <code>count</code> regions
*/
private static HRegionInfo[] makeHRegionInfos(final byte[] tableName, final int count, final long namespaceSpan) {
byte[] startKey = HConstants.EMPTY_BYTE_ARRAY;
byte[] endKey = HConstants.EMPTY_BYTE_ARRAY;
long interval = namespaceSpan / count;
HRegionInfo[] hris = new HRegionInfo[count];
for (int i = 0; i < count; i++) {
if (i == 0) {
endKey = format(interval);
} else {
startKey = endKey;
if (i == count - 1)
endKey = HConstants.EMPTY_BYTE_ARRAY;
else
endKey = format((i + 1) * interval);
}
hris[i] = new HRegionInfo(TableName.valueOf(tableName), startKey, endKey);
}
return hris;
}
use of org.apache.hadoop.hbase.HRegionInfo in project hbase by apache.
the class TestClientNoCluster method doMetaGetResponse.
static GetResponse doMetaGetResponse(final SortedMap<byte[], Pair<HRegionInfo, ServerName>> meta, final GetRequest request) {
ClientProtos.Result.Builder resultBuilder = ClientProtos.Result.newBuilder();
ByteString row = request.getGet().getRow();
Pair<HRegionInfo, ServerName> p = meta.get(row.toByteArray());
if (p != null) {
resultBuilder.addCell(getRegionInfo(row, p.getFirst()));
resultBuilder.addCell(getServer(row, p.getSecond()));
}
resultBuilder.addCell(getStartCode(row));
GetResponse.Builder builder = GetResponse.newBuilder();
builder.setResult(resultBuilder.build());
return builder.build();
}
Aggregations