use of org.apache.hadoop.hbase.master.AssignmentManager in project hbase by apache.
the class ServerCrashProcedure method splitLogs.
private void splitLogs(final MasterProcedureEnv env) throws IOException {
if (LOG.isDebugEnabled()) {
LOG.debug("Splitting logs from " + serverName + "; region count=" + size(this.regionsOnCrashedServer));
}
MasterWalManager mwm = env.getMasterServices().getMasterWalManager();
AssignmentManager am = env.getMasterServices().getAssignmentManager();
// TODO: For Matteo. Below BLOCKs!!!! Redo so can relinquish executor while it is running.
mwm.splitLog(this.serverName);
am.getRegionStates().logSplit(this.serverName);
}
use of org.apache.hadoop.hbase.master.AssignmentManager in project hbase by apache.
the class ServerCrashProcedure method processMeta.
/**
* @param env
* @return False if we fail to assign and split logs on meta ('process').
* @throws IOException
* @throws InterruptedException
*/
private boolean processMeta(final MasterProcedureEnv env) throws IOException {
if (LOG.isDebugEnabled())
LOG.debug("Processing hbase:meta that was on " + this.serverName);
MasterWalManager mwm = env.getMasterServices().getMasterWalManager();
AssignmentManager am = env.getMasterServices().getAssignmentManager();
HRegionInfo metaHRI = HRegionInfo.FIRST_META_REGIONINFO;
if (this.shouldSplitWal) {
if (this.distributedLogReplay) {
prepareLogReplay(env, META_REGION_SET);
} else {
// TODO: Matteo. We BLOCK here but most important thing to be doing at this moment.
mwm.splitMetaLog(serverName);
am.getRegionStates().logSplit(metaHRI);
}
}
// Assign meta if still carrying it. Check again: region may be assigned because of RIT timeout
boolean processed = true;
if (am.isCarryingMeta(serverName)) {
// TODO: May block here if hard time figuring state of meta.
am.regionOffline(HRegionInfo.FIRST_META_REGIONINFO);
verifyAndAssignMetaWithRetries(env);
if (this.shouldSplitWal && distributedLogReplay) {
int timeout = env.getMasterConfiguration().getInt(KEY_WAIT_ON_RIT, DEFAULT_WAIT_ON_RIT);
if (!waitOnRegionToClearRegionsInTransition(am, metaHRI, timeout)) {
processed = false;
} else {
// TODO: Matteo. We BLOCK here but most important thing to be doing at this moment.
mwm.splitMetaLog(serverName);
}
}
}
return processed;
}
use of org.apache.hadoop.hbase.master.AssignmentManager in project hbase by apache.
the class MasterDDLOperationHelper method reOpenAllRegions.
/**
* Reopen all regions from a table after a schema change operation.
**/
public static boolean reOpenAllRegions(final MasterProcedureEnv env, final TableName tableName, final List<HRegionInfo> regionInfoList) throws IOException {
boolean done = false;
LOG.info("Bucketing regions by region server...");
List<HRegionLocation> regionLocations = null;
Connection connection = env.getMasterServices().getConnection();
try (RegionLocator locator = connection.getRegionLocator(tableName)) {
regionLocations = locator.getAllRegionLocations();
}
// Convert List<HRegionLocation> to Map<HRegionInfo, ServerName>.
NavigableMap<HRegionInfo, ServerName> hri2Sn = new TreeMap<>();
for (HRegionLocation location : regionLocations) {
hri2Sn.put(location.getRegionInfo(), location.getServerName());
}
TreeMap<ServerName, List<HRegionInfo>> serverToRegions = Maps.newTreeMap();
List<HRegionInfo> reRegions = new ArrayList<>();
for (HRegionInfo hri : regionInfoList) {
ServerName sn = hri2Sn.get(hri);
// See HBASE-4578 for more information.
if (null == sn) {
LOG.info("Skip " + hri);
continue;
}
if (!serverToRegions.containsKey(sn)) {
LinkedList<HRegionInfo> hriList = Lists.newLinkedList();
serverToRegions.put(sn, hriList);
}
reRegions.add(hri);
serverToRegions.get(sn).add(hri);
}
LOG.info("Reopening " + reRegions.size() + " regions on " + serverToRegions.size() + " region servers.");
AssignmentManager am = env.getMasterServices().getAssignmentManager();
am.setRegionsToReopen(reRegions);
BulkReOpen bulkReopen = new BulkReOpen(env.getMasterServices(), serverToRegions, am);
while (true) {
try {
if (bulkReopen.bulkReOpen()) {
done = true;
break;
} else {
LOG.warn("Timeout before reopening all regions");
}
} catch (InterruptedException e) {
LOG.warn("Reopen was interrupted");
// Preserve the interrupt.
Thread.currentThread().interrupt();
break;
}
}
return done;
}
use of org.apache.hadoop.hbase.master.AssignmentManager in project hbase by apache.
the class DeleteTableProcedure method deleteAssignmentState.
protected static void deleteAssignmentState(final MasterProcedureEnv env, final TableName tableName) throws IOException {
final AssignmentManager am = env.getMasterServices().getAssignmentManager();
// Clean up regions of the table in RegionStates.
LOG.debug("Removing '" + tableName + "' from region states.");
am.getRegionStates().tableDeleted(tableName);
// If entry for this table states, remove it.
LOG.debug("Marking '" + tableName + "' as deleted.");
env.getMasterServices().getTableStateManager().setDeletedTable(tableName);
}
use of org.apache.hadoop.hbase.master.AssignmentManager in project hbase by apache.
the class TestHBaseFsckOneRS method testCleanUpDaughtersNotInMetaAfterFailedSplit.
@Test(timeout = 180000)
public void testCleanUpDaughtersNotInMetaAfterFailedSplit() throws Exception {
final TableName tableName = TableName.valueOf(name.getMethodName());
MiniHBaseCluster cluster = TEST_UTIL.getHBaseCluster();
try {
HTableDescriptor desc = new HTableDescriptor(tableName);
desc.addFamily(new HColumnDescriptor(Bytes.toBytes("f")));
createTable(TEST_UTIL, desc, null);
tbl = connection.getTable(desc.getTableName());
for (int i = 0; i < 5; i++) {
Put p1 = new Put(("r" + i).getBytes());
p1.addColumn(Bytes.toBytes("f"), "q1".getBytes(), "v".getBytes());
tbl.put(p1);
}
admin.flush(desc.getTableName());
List<HRegion> regions = cluster.getRegions(desc.getTableName());
int serverWith = cluster.getServerWith(regions.get(0).getRegionInfo().getRegionName());
HRegionServer regionServer = cluster.getRegionServer(serverWith);
byte[] parentRegionName = regions.get(0).getRegionInfo().getRegionName();
cluster.getServerWith(parentRegionName);
// Create daughters without adding to META table
MasterProcedureEnv env = cluster.getMaster().getMasterProcedureExecutor().getEnvironment();
SplitTableRegionProcedure splitR = new SplitTableRegionProcedure(env, regions.get(0).getRegionInfo(), Bytes.toBytes("r3"));
splitR.prepareSplitRegion(env);
splitR.setRegionStateToSplitting(env);
splitR.closeParentRegionForSplit(env);
splitR.createDaughterRegions(env);
AssignmentManager am = cluster.getMaster().getAssignmentManager();
for (RegionState state : am.getRegionStates().getRegionsInTransition()) {
am.regionOffline(state.getRegion());
}
Map<HRegionInfo, ServerName> regionsMap = new HashMap<>();
regionsMap.put(regions.get(0).getRegionInfo(), regionServer.getServerName());
am.assign(regionsMap);
am.waitForAssignment(regions.get(0).getRegionInfo());
HBaseFsck hbck = doFsck(conf, false);
assertErrors(hbck, new HBaseFsck.ErrorReporter.ERROR_CODE[] { HBaseFsck.ErrorReporter.ERROR_CODE.NOT_IN_META_OR_DEPLOYED, HBaseFsck.ErrorReporter.ERROR_CODE.NOT_IN_META_OR_DEPLOYED });
// holes are separate from overlap groups
assertEquals(0, hbck.getOverlapGroups(tableName).size());
// fix hole
assertErrors(doFsck(conf, false, true, false, false, false, false, false, false, false, false, false, false, null), new HBaseFsck.ErrorReporter.ERROR_CODE[] { HBaseFsck.ErrorReporter.ERROR_CODE.NOT_IN_META_OR_DEPLOYED, HBaseFsck.ErrorReporter.ERROR_CODE.NOT_IN_META_OR_DEPLOYED });
// check that hole fixed
assertNoErrors(doFsck(conf, false));
assertEquals(5, countRows());
} finally {
if (tbl != null) {
tbl.close();
tbl = null;
}
cleanupTable(tableName);
}
}
Aggregations