use of org.apache.hadoop.hbase.master.RegionState in project hbase by apache.
the class HBaseFsckRepair method waitUntilAssigned.
/*
* Should we check all assignments or just not in RIT?
*/
public static void waitUntilAssigned(Admin admin, HRegionInfo region) throws IOException, InterruptedException {
long timeout = admin.getConfiguration().getLong("hbase.hbck.assign.timeout", 120000);
long expiration = timeout + EnvironmentEdgeManager.currentTime();
while (EnvironmentEdgeManager.currentTime() < expiration) {
try {
boolean inTransition = false;
for (RegionState rs : admin.getClusterStatus().getRegionsInTransition()) {
if (rs.getRegion().equals(region)) {
inTransition = true;
break;
}
}
if (!inTransition) {
// yay! no longer RIT
return;
}
// still in rit
LOG.info("Region still in transition, waiting for " + "it to become assigned: " + region);
} catch (IOException e) {
LOG.warn("Exception when waiting for region to become assigned," + " retrying", e);
}
Thread.sleep(1000);
}
throw new IOException("Region " + region + " failed to move out of " + "transition within timeout " + timeout + "ms");
}
use of org.apache.hadoop.hbase.master.RegionState in project hbase by apache.
the class MergeTableRegionsProcedure method openMergedRegions.
/**
* Assign merged region
* @param env MasterProcedureEnv
* @throws IOException
* @throws InterruptedException
**/
private void openMergedRegions(final MasterProcedureEnv env) throws IOException, InterruptedException {
// Check whether the merged region is already opened; if so,
// this is retry and we should just ignore.
RegionState regionState = getAssignmentManager(env).getRegionStates().getRegionState(mergedRegionInfo);
if (regionState != null && regionState.isOpened()) {
LOG.info("Skip opening merged region " + mergedRegionInfo.getRegionNameAsString() + " as it is already opened.");
return;
}
// TODO: The new AM should provide an API to force assign the merged region to the same RS
// as daughter regions; if the RS is unavailable, then assign to a different RS.
env.getMasterServices().getAssignmentManager().assignMergedRegion(mergedRegionInfo, regionsToMerge[0], regionsToMerge[1]);
}
use of org.apache.hadoop.hbase.master.RegionState in project hbase by apache.
the class MergeTableRegionsProcedure method prepareMergeRegion.
/**
* Prepare merge and do some check
* @param env MasterProcedureEnv
* @throws IOException
*/
private void prepareMergeRegion(final MasterProcedureEnv env) throws IOException {
// Note: the following logic assumes that we only have 2 regions to merge. In the future,
// if we want to extend to more than 2 regions, the code needs to modify a little bit.
//
CatalogJanitor catalogJanitor = env.getMasterServices().getCatalogJanitor();
boolean regionAHasMergeQualifier = !catalogJanitor.cleanMergeQualifier(regionsToMerge[0]);
if (regionAHasMergeQualifier || !catalogJanitor.cleanMergeQualifier(regionsToMerge[1])) {
String msg = "Skip merging regions " + getRegionsToMergeListFullNameString() + ", because region " + (regionAHasMergeQualifier ? regionsToMerge[0].getEncodedName() : regionsToMerge[1].getEncodedName()) + " has merge qualifier";
LOG.warn(msg);
throw new MergeRegionException(msg);
}
RegionStates regionStates = getAssignmentManager(env).getRegionStates();
RegionState regionStateA = regionStates.getRegionState(regionsToMerge[0].getEncodedName());
RegionState regionStateB = regionStates.getRegionState(regionsToMerge[1].getEncodedName());
if (regionStateA == null || regionStateB == null) {
throw new UnknownRegionException(regionStateA == null ? regionsToMerge[0].getEncodedName() : regionsToMerge[1].getEncodedName());
}
if (!regionStateA.isOpened() || !regionStateB.isOpened()) {
throw new MergeRegionException("Unable to merge regions not online " + regionStateA + ", " + regionStateB);
}
}
use of org.apache.hadoop.hbase.master.RegionState in project hbase by apache.
the class TestHRegionInfo method testRegionDetailsForDisplay.
@Test
public void testRegionDetailsForDisplay() throws IOException {
byte[] startKey = new byte[] { 0x01, 0x01, 0x02, 0x03 };
byte[] endKey = new byte[] { 0x01, 0x01, 0x02, 0x04 };
Configuration conf = new Configuration();
conf.setBoolean("hbase.display.keys", false);
HRegionInfo h = new HRegionInfo(TableName.valueOf(name.getMethodName()), startKey, endKey);
checkEquality(h, conf);
// check HRIs with non-default replicaId
h = new HRegionInfo(TableName.valueOf(name.getMethodName()), startKey, endKey, false, System.currentTimeMillis(), 1);
checkEquality(h, conf);
Assert.assertArrayEquals(HRegionInfo.HIDDEN_END_KEY, HRegionInfo.getEndKeyForDisplay(h, conf));
Assert.assertArrayEquals(HRegionInfo.HIDDEN_START_KEY, HRegionInfo.getStartKeyForDisplay(h, conf));
RegionState state = new RegionState(h, RegionState.State.OPEN);
String descriptiveNameForDisplay = HRegionInfo.getDescriptiveNameFromRegionStateForDisplay(state, conf);
checkDescriptiveNameEquality(descriptiveNameForDisplay, state.toDescriptiveString(), startKey);
conf.setBoolean("hbase.display.keys", true);
Assert.assertArrayEquals(endKey, HRegionInfo.getEndKeyForDisplay(h, conf));
Assert.assertArrayEquals(startKey, HRegionInfo.getStartKeyForDisplay(h, conf));
Assert.assertEquals(state.toDescriptiveString(), HRegionInfo.getDescriptiveNameFromRegionStateForDisplay(state, conf));
}
use of org.apache.hadoop.hbase.master.RegionState in project hbase by apache.
the class ProtobufUtil method convert.
/**
* Convert a protobuf ClusterStatus to a ClusterStatus
*
* @param proto the protobuf ClusterStatus
* @return the converted ClusterStatus
*/
public static ClusterStatus convert(ClusterStatusProtos.ClusterStatus proto) {
Map<ServerName, ServerLoad> servers = null;
servers = new HashMap<>(proto.getLiveServersList().size());
for (LiveServerInfo lsi : proto.getLiveServersList()) {
servers.put(ProtobufUtil.toServerName(lsi.getServer()), new ServerLoad(lsi.getServerLoad()));
}
Collection<ServerName> deadServers = null;
deadServers = new ArrayList<>(proto.getDeadServersList().size());
for (HBaseProtos.ServerName sn : proto.getDeadServersList()) {
deadServers.add(ProtobufUtil.toServerName(sn));
}
Collection<ServerName> backupMasters = null;
backupMasters = new ArrayList<>(proto.getBackupMastersList().size());
for (HBaseProtos.ServerName sn : proto.getBackupMastersList()) {
backupMasters.add(ProtobufUtil.toServerName(sn));
}
Set<RegionState> rit = null;
rit = new HashSet<>(proto.getRegionsInTransitionList().size());
for (RegionInTransition region : proto.getRegionsInTransitionList()) {
RegionState value = RegionState.convert(region.getRegionState());
rit.add(value);
}
String[] masterCoprocessors = null;
final int numMasterCoprocessors = proto.getMasterCoprocessorsCount();
masterCoprocessors = new String[numMasterCoprocessors];
for (int i = 0; i < numMasterCoprocessors; i++) {
masterCoprocessors[i] = proto.getMasterCoprocessors(i).getName();
}
return new ClusterStatus(proto.getHbaseVersion().getVersion(), ClusterId.convert(proto.getClusterId()).toString(), servers, deadServers, ProtobufUtil.toServerName(proto.getMaster()), backupMasters, rit, masterCoprocessors, proto.getBalancerOn());
}
Aggregations