use of org.apache.hadoop.hbase.master.HMaster in project hbase by apache.
the class HBaseTestingUtil method waitUntilAllRegionsAssigned.
/**
* Wait until all regions for a table in hbase:meta have a non-empty info:server, or until
* timeout. This means all regions have been deployed, master has been informed and updated
* hbase:meta with the regions deployed server.
* @param tableName the table name
* @param timeout timeout, in milliseconds
*/
public void waitUntilAllRegionsAssigned(final TableName tableName, final long timeout) throws IOException {
if (!TableName.isMetaTableName(tableName)) {
try (final Table meta = getConnection().getTable(TableName.META_TABLE_NAME)) {
LOG.debug("Waiting until all regions of table " + tableName + " get assigned. Timeout = " + timeout + "ms");
waitFor(timeout, 200, true, new ExplainingPredicate<IOException>() {
@Override
public String explainFailure() throws IOException {
return explainTableAvailability(tableName);
}
@Override
public boolean evaluate() throws IOException {
Scan scan = new Scan();
scan.addFamily(HConstants.CATALOG_FAMILY);
boolean tableFound = false;
try (ResultScanner s = meta.getScanner(scan)) {
for (Result r; (r = s.next()) != null; ) {
byte[] b = r.getValue(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER);
RegionInfo info = RegionInfo.parseFromOrNull(b);
if (info != null && info.getTable().equals(tableName)) {
// Get server hosting this region from catalog family. Return false if no server
// hosting this region, or if the server hosting this region was recently killed
// (for fault tolerance testing).
tableFound = true;
byte[] server = r.getValue(HConstants.CATALOG_FAMILY, HConstants.SERVER_QUALIFIER);
if (server == null) {
return false;
} else {
byte[] startCode = r.getValue(HConstants.CATALOG_FAMILY, HConstants.STARTCODE_QUALIFIER);
ServerName serverName = ServerName.valueOf(Bytes.toString(server).replaceFirst(":", ",") + "," + Bytes.toLong(startCode));
if (!getHBaseClusterInterface().isDistributedCluster() && getHBaseCluster().isKilledRS(serverName)) {
return false;
}
}
if (RegionStateStore.getRegionState(r, info) != RegionState.State.OPEN) {
return false;
}
}
}
}
if (!tableFound) {
LOG.warn("Didn't find the entries for table " + tableName + " in meta, already deleted?");
}
return tableFound;
}
});
}
}
LOG.info("All regions for table " + tableName + " assigned to meta. Checking AM states.");
// check from the master state if we are using a mini cluster
if (!getHBaseClusterInterface().isDistributedCluster()) {
// So, all regions are in the meta table but make sure master knows of the assignments before
// returning -- sometimes this can lag.
HMaster master = getHBaseCluster().getMaster();
final RegionStates states = master.getAssignmentManager().getRegionStates();
waitFor(timeout, 200, new ExplainingPredicate<IOException>() {
@Override
public String explainFailure() throws IOException {
return explainTableAvailability(tableName);
}
@Override
public boolean evaluate() throws IOException {
List<RegionInfo> hris = states.getRegionsOfTable(tableName);
return hris != null && !hris.isEmpty();
}
});
}
LOG.info("All regions for table " + tableName + " assigned.");
}
use of org.apache.hadoop.hbase.master.HMaster in project hbase by apache.
the class HBaseTestingUtil method predicateNoRegionsInTransition.
/**
* Returns a {@link Predicate} for checking that there are no regions in transition in master
*/
public ExplainingPredicate<IOException> predicateNoRegionsInTransition() {
return new ExplainingPredicate<IOException>() {
@Override
public String explainFailure() throws IOException {
final RegionStates regionStates = getMiniHBaseCluster().getMaster().getAssignmentManager().getRegionStates();
return "found in transition: " + regionStates.getRegionsInTransition().toString();
}
@Override
public boolean evaluate() throws IOException {
HMaster master = getMiniHBaseCluster().getMaster();
if (master == null)
return false;
AssignmentManager am = master.getAssignmentManager();
if (am == null)
return false;
return !am.hasRegionsInTransition();
}
};
}
use of org.apache.hadoop.hbase.master.HMaster in project hbase by apache.
the class HBaseTestingUtil method expireMasterSession.
/**
* Expire the Master's session
*/
public void expireMasterSession() throws Exception {
HMaster master = getMiniHBaseCluster().getMaster();
expireSession(master.getZooKeeper(), false);
}
use of org.apache.hadoop.hbase.master.HMaster in project hbase by apache.
the class TestSplitTransactionOnCluster method testMasterRestartAtRegionSplitPendingCatalogJanitor.
/**
* Verifies HBASE-5806. Here the case is that splitting is completed but before the CJ could
* remove the parent region the master is killed and restarted.
*/
@Test
public void testMasterRestartAtRegionSplitPendingCatalogJanitor() throws IOException, InterruptedException, NodeExistsException, KeeperException, ServiceException, ExecutionException, TimeoutException {
final TableName tableName = TableName.valueOf(name.getMethodName());
// Create table then get the single region for our new table.
try (Table t = createTableAndWait(tableName, HConstants.CATALOG_FAMILY)) {
List<HRegion> regions = cluster.getRegions(tableName);
RegionInfo hri = getAndCheckSingleTableRegion(regions);
int tableRegionIndex = ensureTableRegionNotOnSameServerAsMeta(admin, hri);
// Turn off balancer so it doesn't cut in and mess up our placements.
this.admin.balancerSwitch(false, true);
// Turn off the meta scanner so it don't remove parent on us.
cluster.getMaster().setCatalogJanitorEnabled(false);
// Add a bit of load up into the table so splittable.
TESTING_UTIL.loadTable(t, HConstants.CATALOG_FAMILY, false);
// Get region pre-split.
HRegionServer server = cluster.getRegionServer(tableRegionIndex);
printOutRegions(server, "Initial regions: ");
// Call split.
this.admin.splitRegionAsync(hri.getRegionName()).get(2, TimeUnit.MINUTES);
List<HRegion> daughters = checkAndGetDaughters(tableName);
// Before cleanup, get a new master.
HMaster master = abortAndWaitForMaster();
// Now call compact on the daughters and clean up any references.
for (HRegion daughter : daughters) {
clearReferences(daughter);
assertFalse(daughter.hasReferences());
}
// doens't find any references.
for (RegionServerThread rst : cluster.getRegionServerThreads()) {
boolean oldSetting = rst.getRegionServer().compactedFileDischarger.setUseExecutor(false);
rst.getRegionServer().compactedFileDischarger.run();
rst.getRegionServer().compactedFileDischarger.setUseExecutor(oldSetting);
}
cluster.getMaster().setCatalogJanitorEnabled(true);
ProcedureTestingUtility.waitAllProcedures(cluster.getMaster().getMasterProcedureExecutor());
LOG.info("Starting run of CatalogJanitor");
cluster.getMaster().getCatalogJanitor().run();
ProcedureTestingUtility.waitAllProcedures(cluster.getMaster().getMasterProcedureExecutor());
RegionStates regionStates = master.getAssignmentManager().getRegionStates();
ServerName regionServerOfRegion = regionStates.getRegionServerOfRegion(hri);
assertEquals(null, regionServerOfRegion);
} finally {
TESTING_UTIL.getAdmin().balancerSwitch(true, false);
cluster.getMaster().setCatalogJanitorEnabled(true);
}
}
use of org.apache.hadoop.hbase.master.HMaster in project hbase by apache.
the class TestFlushSnapshotFromClient method testAsyncFlushSnapshot.
@Test
public void testAsyncFlushSnapshot() throws Exception {
SnapshotProtos.SnapshotDescription snapshot = SnapshotProtos.SnapshotDescription.newBuilder().setName("asyncSnapshot").setTable(TABLE_NAME.getNameAsString()).setType(SnapshotProtos.SnapshotDescription.Type.FLUSH).build();
// take the snapshot async
admin.snapshotAsync(new SnapshotDescription("asyncSnapshot", TABLE_NAME, SnapshotType.FLUSH));
// constantly loop, looking for the snapshot to complete
HMaster master = UTIL.getMiniHBaseCluster().getMaster();
waitForSnapshotToComplete(master, snapshot, TimeUnit.MINUTES.toNanos(1));
LOG.info(" === Async Snapshot Completed ===");
UTIL.getHBaseCluster().getMaster().getMasterFileSystem().logFileSystemState(LOG);
// make sure we get the snapshot
SnapshotTestingUtils.assertOneSnapshotThatMatches(admin, snapshot);
}
Aggregations