use of org.apache.hadoop.hbase.HRegionLocation in project hbase by apache.
the class HBaseFsck method recordMetaRegion.
/**
* Record the location of the hbase:meta region as found in ZooKeeper.
*/
private boolean recordMetaRegion() throws IOException {
RegionLocations rl = ((ClusterConnection) connection).locateRegion(TableName.META_TABLE_NAME, HConstants.EMPTY_START_ROW, false, false);
if (rl == null) {
errors.reportError(ERROR_CODE.NULL_META_REGION, "META region was not found in ZooKeeper");
return false;
}
for (HRegionLocation metaLocation : rl.getRegionLocations()) {
// Check if Meta region is valid and existing
if (metaLocation == null) {
errors.reportError(ERROR_CODE.NULL_META_REGION, "META region location is null");
return false;
}
if (metaLocation.getRegionInfo() == null) {
errors.reportError(ERROR_CODE.NULL_META_REGION, "META location regionInfo is null");
return false;
}
if (metaLocation.getHostname() == null) {
errors.reportError(ERROR_CODE.NULL_META_REGION, "META location hostName is null");
return false;
}
ServerName sn = metaLocation.getServerName();
MetaEntry m = new MetaEntry(metaLocation.getRegionInfo(), sn, EnvironmentEdgeManager.currentTime());
HbckInfo hbckInfo = regionInfoMap.get(metaLocation.getRegionInfo().getEncodedName());
if (hbckInfo == null) {
regionInfoMap.put(metaLocation.getRegionInfo().getEncodedName(), new HbckInfo(m));
} else {
hbckInfo.metaEntry = m;
}
}
return true;
}
use of org.apache.hadoop.hbase.HRegionLocation in project hbase by apache.
the class HBaseFsck method closeRegion.
/**
* Attempts to undeploy a region from a region server based in information in
* META. Any operations that modify the file system should make sure that
* its corresponding region is not deployed to prevent data races.
*
* A separate call is required to update the master in-memory region state
* kept in the AssignementManager. Because disable uses this state instead of
* that found in META, we can't seem to cleanly disable/delete tables that
* have been hbck fixed. When used on a version of HBase that does not have
* the offline ipc call exposed on the master (<0.90.5, <0.92.0) a master
* restart or failover may be required.
*/
private void closeRegion(HbckInfo hi) throws IOException, InterruptedException {
if (hi.metaEntry == null && hi.hdfsEntry == null) {
undeployRegions(hi);
return;
}
// get assignment info and hregioninfo from meta.
Get get = new Get(hi.getRegionName());
get.addColumn(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER);
get.addColumn(HConstants.CATALOG_FAMILY, HConstants.SERVER_QUALIFIER);
get.addColumn(HConstants.CATALOG_FAMILY, HConstants.STARTCODE_QUALIFIER);
// also get the locations of the replicas to close if the primary region is being closed
if (hi.getReplicaId() == HRegionInfo.DEFAULT_REPLICA_ID) {
int numReplicas = admin.getTableDescriptor(hi.getTableName()).getRegionReplication();
for (int i = 0; i < numReplicas; i++) {
get.addColumn(HConstants.CATALOG_FAMILY, MetaTableAccessor.getServerColumn(i));
get.addColumn(HConstants.CATALOG_FAMILY, MetaTableAccessor.getStartCodeColumn(i));
}
}
Result r = meta.get(get);
RegionLocations rl = MetaTableAccessor.getRegionLocations(r);
if (rl == null) {
LOG.warn("Unable to close region " + hi.getRegionNameAsString() + " since meta does not have handle to reach it");
return;
}
for (HRegionLocation h : rl.getRegionLocations()) {
ServerName serverName = h.getServerName();
if (serverName == null) {
errors.reportError("Unable to close region " + hi.getRegionNameAsString() + " because meta does not " + "have handle to reach it.");
continue;
}
HRegionInfo hri = h.getRegionInfo();
if (hri == null) {
LOG.warn("Unable to close region " + hi.getRegionNameAsString() + " because hbase:meta had invalid or missing " + HConstants.CATALOG_FAMILY_STR + ":" + Bytes.toString(HConstants.REGIONINFO_QUALIFIER) + " qualifier value.");
continue;
}
// close the region -- close files and remove assignment
HBaseFsckRepair.closeRegionSilentlyAndWait(connection, serverName, hri);
}
}
use of org.apache.hadoop.hbase.HRegionLocation in project hbase by apache.
the class HBaseFsck method loadMetaEntries.
/**
* Scan hbase:meta, adding all regions found to the regionInfo map.
* @throws IOException if an error is encountered
*/
boolean loadMetaEntries() throws IOException {
MetaTableAccessor.Visitor visitor = new MetaTableAccessor.Visitor() {
int countRecord = 1;
// comparator to sort KeyValues with latest modtime
final Comparator<Cell> comp = new Comparator<Cell>() {
@Override
public int compare(Cell k1, Cell k2) {
return Long.compare(k1.getTimestamp(), k2.getTimestamp());
}
};
@Override
public boolean visit(Result result) throws IOException {
try {
// record the latest modification of this META record
long ts = Collections.max(result.listCells(), comp).getTimestamp();
RegionLocations rl = MetaTableAccessor.getRegionLocations(result);
if (rl == null) {
emptyRegionInfoQualifiers.add(result);
errors.reportError(ERROR_CODE.EMPTY_META_CELL, "Empty REGIONINFO_QUALIFIER found in hbase:meta");
return true;
}
ServerName sn = null;
if (rl.getRegionLocation(HRegionInfo.DEFAULT_REPLICA_ID) == null || rl.getRegionLocation(HRegionInfo.DEFAULT_REPLICA_ID).getRegionInfo() == null) {
emptyRegionInfoQualifiers.add(result);
errors.reportError(ERROR_CODE.EMPTY_META_CELL, "Empty REGIONINFO_QUALIFIER found in hbase:meta");
return true;
}
HRegionInfo hri = rl.getRegionLocation(HRegionInfo.DEFAULT_REPLICA_ID).getRegionInfo();
if (!(isTableIncluded(hri.getTable()) || hri.isMetaRegion())) {
return true;
}
PairOfSameType<HRegionInfo> daughters = MetaTableAccessor.getDaughterRegions(result);
for (HRegionLocation h : rl.getRegionLocations()) {
if (h == null || h.getRegionInfo() == null) {
continue;
}
sn = h.getServerName();
hri = h.getRegionInfo();
MetaEntry m = null;
if (hri.getReplicaId() == HRegionInfo.DEFAULT_REPLICA_ID) {
m = new MetaEntry(hri, sn, ts, daughters.getFirst(), daughters.getSecond());
} else {
m = new MetaEntry(hri, sn, ts, null, null);
}
HbckInfo previous = regionInfoMap.get(hri.getEncodedName());
if (previous == null) {
regionInfoMap.put(hri.getEncodedName(), new HbckInfo(m));
} else if (previous.metaEntry == null) {
previous.metaEntry = m;
} else {
throw new IOException("Two entries in hbase:meta are same " + previous);
}
}
PairOfSameType<HRegionInfo> mergeRegions = MetaTableAccessor.getMergeRegions(result);
for (HRegionInfo mergeRegion : new HRegionInfo[] { mergeRegions.getFirst(), mergeRegions.getSecond() }) {
if (mergeRegion != null) {
// This region is already been merged
HbckInfo hbInfo = getOrCreateInfo(mergeRegion.getEncodedName());
hbInfo.setMerged(true);
}
}
// show proof of progress to the user, once for every 100 records.
if (countRecord % 100 == 0) {
errors.progress();
}
countRecord++;
return true;
} catch (RuntimeException e) {
LOG.error("Result=" + result);
throw e;
}
}
};
if (!checkMetaOnly) {
// Scan hbase:meta to pick up user regions
MetaTableAccessor.fullScanRegions(connection, visitor);
}
errors.print("");
return true;
}
use of org.apache.hadoop.hbase.HRegionLocation in project hbase by apache.
the class TestRegionReplicaReplicationEndpoint method testRegionReplicaReplicationIgnoresDisabledTables.
public void testRegionReplicaReplicationIgnoresDisabledTables(boolean dropTable) throws Exception {
// tests having edits from a disabled or dropped table is handled correctly by skipping those
// entries and further edits after the edits from dropped/disabled table can be replicated
// without problems.
final TableName tableName = TableName.valueOf(name.getMethodName() + dropTable);
HTableDescriptor htd = HTU.createTableDescriptor(tableName);
int regionReplication = 3;
htd.setRegionReplication(regionReplication);
HTU.deleteTableIfAny(tableName);
HTU.getAdmin().createTable(htd);
TableName toBeDisabledTable = TableName.valueOf(dropTable ? "droppedTable" : "disabledTable");
HTU.deleteTableIfAny(toBeDisabledTable);
htd = HTU.createTableDescriptor(toBeDisabledTable.toString());
htd.setRegionReplication(regionReplication);
HTU.getAdmin().createTable(htd);
// both tables are created, now pause replication
ReplicationAdmin admin = new ReplicationAdmin(HTU.getConfiguration());
admin.disablePeer(ServerRegionReplicaUtil.getReplicationPeerId());
// now that the replication is disabled, write to the table to be dropped, then drop the table.
Connection connection = ConnectionFactory.createConnection(HTU.getConfiguration());
Table table = connection.getTable(tableName);
Table tableToBeDisabled = connection.getTable(toBeDisabledTable);
HTU.loadNumericRows(tableToBeDisabled, HBaseTestingUtility.fam1, 6000, 7000);
AtomicLong skippedEdits = new AtomicLong();
RegionReplicaReplicationEndpoint.RegionReplicaOutputSink sink = mock(RegionReplicaReplicationEndpoint.RegionReplicaOutputSink.class);
when(sink.getSkippedEditsCounter()).thenReturn(skippedEdits);
RegionReplicaReplicationEndpoint.RegionReplicaSinkWriter sinkWriter = new RegionReplicaReplicationEndpoint.RegionReplicaSinkWriter(sink, (ClusterConnection) connection, Executors.newSingleThreadExecutor(), Integer.MAX_VALUE);
RegionLocator rl = connection.getRegionLocator(toBeDisabledTable);
HRegionLocation hrl = rl.getRegionLocation(HConstants.EMPTY_BYTE_ARRAY);
byte[] encodedRegionName = hrl.getRegionInfo().getEncodedNameAsBytes();
Entry entry = new Entry(new WALKey(encodedRegionName, toBeDisabledTable, 1), new WALEdit());
// disable the table
HTU.getAdmin().disableTable(toBeDisabledTable);
if (dropTable) {
HTU.getAdmin().deleteTable(toBeDisabledTable);
}
sinkWriter.append(toBeDisabledTable, encodedRegionName, HConstants.EMPTY_BYTE_ARRAY, Lists.newArrayList(entry, entry));
assertEquals(2, skippedEdits.get());
try {
// load some data to the to-be-dropped table
// load the data to the table
HTU.loadNumericRows(table, HBaseTestingUtility.fam1, 0, 1000);
// now enable the replication
admin.enablePeer(ServerRegionReplicaUtil.getReplicationPeerId());
verifyReplication(tableName, regionReplication, 0, 1000);
} finally {
admin.close();
table.close();
rl.close();
tableToBeDisabled.close();
HTU.deleteTableIfAny(toBeDisabledTable);
connection.close();
}
}
use of org.apache.hadoop.hbase.HRegionLocation in project hbase by apache.
the class AsyncNonMetaRegionLocator method locateRowInCache.
private HRegionLocation locateRowInCache(TableCache tableCache, TableName tableName, byte[] row) {
Map.Entry<byte[], HRegionLocation> entry = tableCache.cache.floorEntry(row);
if (entry == null) {
return null;
}
HRegionLocation loc = entry.getValue();
byte[] endKey = loc.getRegionInfo().getEndKey();
if (isEmptyStopRow(endKey) || Bytes.compareTo(row, endKey) < 0) {
if (LOG.isTraceEnabled()) {
LOG.trace("Found " + loc + " in cache for '" + tableName + "', row='" + Bytes.toStringBinary(row) + "', locateType=" + RegionLocateType.CURRENT);
}
return loc;
} else {
return null;
}
}
Aggregations