use of org.apache.hadoop.hbase.client.RegionLocator in project hbase by apache.
the class TestCoprocessorMetrics method testRegionObserverAfterRegionClosed.
@Test
public void testRegionObserverAfterRegionClosed() throws IOException {
final TableName tableName = TableName.valueOf(name.getMethodName());
try (Connection connection = ConnectionFactory.createConnection(UTIL.getConfiguration());
Admin admin = connection.getAdmin()) {
admin.createTable(new HTableDescriptor(tableName).addFamily(new HColumnDescriptor(foo)).addCoprocessor(CustomRegionObserver.class.getName()), // create with 2 regions
new byte[][] { foo });
try (Table table = connection.getTable(tableName)) {
table.get(new Get(foo));
// 2 gets
table.get(new Get(foo));
}
assertPreGetRequestsCounter(CustomRegionObserver.class);
// close one of the regions
try (RegionLocator locator = connection.getRegionLocator(tableName)) {
HRegionLocation loc = locator.getRegionLocation(foo);
admin.closeRegion(loc.getServerName(), loc.getRegionInfo());
HRegionServer server = UTIL.getMiniHBaseCluster().getRegionServer(loc.getServerName());
UTIL.waitFor(30000, () -> server.getOnlineRegion(loc.getRegionInfo().getRegionName()) == null);
assertNull(server.getOnlineRegion(loc.getRegionInfo().getRegionName()));
}
// with only 1 region remaining, we should still be able to find the Counter
assertPreGetRequestsCounter(CustomRegionObserver.class);
// close the table
admin.disableTable(tableName);
MetricRegistryInfo info = MetricsCoprocessor.createRegistryInfoForRegionCoprocessor(CustomRegionObserver.class.getName());
// ensure that MetricRegistry is deleted
Optional<MetricRegistry> registry = MetricRegistries.global().get(info);
assertFalse(registry.isPresent());
}
}
use of org.apache.hadoop.hbase.client.RegionLocator in project hbase by apache.
the class TestCoprocessorMetrics method testRegionObserverMultiRegion.
@Test
public void testRegionObserverMultiRegion() throws IOException {
final TableName tableName = TableName.valueOf(name.getMethodName());
try (Connection connection = ConnectionFactory.createConnection(UTIL.getConfiguration());
Admin admin = connection.getAdmin()) {
admin.createTable(new HTableDescriptor(tableName).addFamily(new HColumnDescriptor(foo)).addCoprocessor(CustomRegionObserver.class.getName()), // create with 2 regions
new byte[][] { foo });
try (Table table = connection.getTable(tableName);
RegionLocator locator = connection.getRegionLocator(tableName)) {
table.get(new Get(bar));
// 2 gets to 2 separate regions
table.get(new Get(foo));
assertEquals(2, locator.getAllRegionLocations().size());
assertNotEquals(locator.getRegionLocation(bar).getRegionInfo(), locator.getRegionLocation(foo).getRegionInfo());
}
}
assertPreGetRequestsCounter(CustomRegionObserver.class);
}
use of org.apache.hadoop.hbase.client.RegionLocator in project hbase by apache.
the class TestDistributedLogSplitting method installTable.
Table installTable(ZooKeeperWatcher zkw, String tname, String fname, int nrs, int existingRegions) throws Exception {
// Create a table with regions
TableName table = TableName.valueOf(tname);
byte[] family = Bytes.toBytes(fname);
LOG.info("Creating table with " + nrs + " regions");
Table ht = TEST_UTIL.createMultiRegionTable(table, family, nrs);
int numRegions = -1;
try (RegionLocator r = TEST_UTIL.getConnection().getRegionLocator(table)) {
numRegions = r.getStartKeys().length;
}
assertEquals(nrs, numRegions);
LOG.info("Waiting for no more RIT\n");
blockUntilNoRIT(zkw, master);
// disable-enable cycle to get rid of table's dead regions left behind
// by createMultiRegions
LOG.debug("Disabling table\n");
TEST_UTIL.getAdmin().disableTable(table);
LOG.debug("Waiting for no more RIT\n");
blockUntilNoRIT(zkw, master);
NavigableSet<String> regions = HBaseTestingUtility.getAllOnlineRegions(cluster);
LOG.debug("Verifying only catalog and namespace regions are assigned\n");
if (regions.size() != 2) {
for (String oregion : regions) LOG.debug("Region still online: " + oregion);
}
assertEquals(2 + existingRegions, regions.size());
LOG.debug("Enabling table\n");
TEST_UTIL.getAdmin().enableTable(table);
LOG.debug("Waiting for no more RIT\n");
blockUntilNoRIT(zkw, master);
LOG.debug("Verifying there are " + numRegions + " assigned on cluster\n");
regions = HBaseTestingUtility.getAllOnlineRegions(cluster);
assertEquals(numRegions + 2 + existingRegions, regions.size());
return ht;
}
use of org.apache.hadoop.hbase.client.RegionLocator in project hbase by apache.
the class BaseTestHBaseFsck method deleteRegion.
/**
* Delete a region from assignments, meta, or completely from hdfs.
* @param unassign if true unassign region if assigned
* @param metaRow if true remove region's row from META
* @param hdfs if true remove region's dir in HDFS
* @param regionInfoOnly if true remove a region dir's .regioninfo file
* @param replicaId replica id
*/
protected void deleteRegion(Configuration conf, final HTableDescriptor htd, byte[] startKey, byte[] endKey, boolean unassign, boolean metaRow, boolean hdfs, boolean regionInfoOnly, int replicaId) throws IOException, InterruptedException {
LOG.info("** Before delete:");
dumpMeta(htd.getTableName());
List<HRegionLocation> locations;
try (RegionLocator rl = connection.getRegionLocator(tbl.getName())) {
locations = rl.getAllRegionLocations();
}
for (HRegionLocation location : locations) {
HRegionInfo hri = location.getRegionInfo();
ServerName hsa = location.getServerName();
if (Bytes.compareTo(hri.getStartKey(), startKey) == 0 && Bytes.compareTo(hri.getEndKey(), endKey) == 0 && hri.getReplicaId() == replicaId) {
LOG.info("RegionName: " + hri.getRegionNameAsString());
byte[] deleteRow = hri.getRegionName();
if (unassign) {
LOG.info("Undeploying region " + hri + " from server " + hsa);
undeployRegion(connection, hsa, hri);
}
if (regionInfoOnly) {
LOG.info("deleting hdfs .regioninfo data: " + hri.toString() + hsa.toString());
Path rootDir = FSUtils.getRootDir(conf);
FileSystem fs = rootDir.getFileSystem(conf);
Path p = new Path(FSUtils.getTableDir(rootDir, htd.getTableName()), hri.getEncodedName());
Path hriPath = new Path(p, HRegionFileSystem.REGION_INFO_FILE);
fs.delete(hriPath, true);
}
if (hdfs) {
LOG.info("deleting hdfs data: " + hri.toString() + hsa.toString());
Path rootDir = FSUtils.getRootDir(conf);
FileSystem fs = rootDir.getFileSystem(conf);
Path p = new Path(FSUtils.getTableDir(rootDir, htd.getTableName()), hri.getEncodedName());
HBaseFsck.debugLsr(conf, p);
boolean success = fs.delete(p, true);
LOG.info("Deleted " + p + " sucessfully? " + success);
HBaseFsck.debugLsr(conf, p);
}
if (metaRow) {
try (Table meta = connection.getTable(TableName.META_TABLE_NAME, tableExecutorService)) {
Delete delete = new Delete(deleteRow);
meta.delete(delete);
}
}
}
LOG.info(hri.toString() + hsa.toString());
}
TEST_UTIL.getMetaTableRows(htd.getTableName());
LOG.info("*** After delete:");
dumpMeta(htd.getTableName());
}
use of org.apache.hadoop.hbase.client.RegionLocator in project hbase by apache.
the class TestRegionObserverInterface method bulkLoadHFileTest.
@Test(timeout = 300000)
public void bulkLoadHFileTest() throws Exception {
final String testName = TestRegionObserverInterface.class.getName() + "." + name.getMethodName();
final TableName tableName = TableName.valueOf(TEST_TABLE.getNameAsString() + "." + name.getMethodName());
Configuration conf = util.getConfiguration();
Table table = util.createTable(tableName, new byte[][] { A, B, C });
try (RegionLocator locator = util.getConnection().getRegionLocator(tableName)) {
verifyMethodResult(SimpleRegionObserver.class, new String[] { "hadPreBulkLoadHFile", "hadPostBulkLoadHFile" }, tableName, new Boolean[] { false, false });
FileSystem fs = util.getTestFileSystem();
final Path dir = util.getDataTestDirOnTestFS(testName).makeQualified(fs);
Path familyDir = new Path(dir, Bytes.toString(A));
createHFile(util.getConfiguration(), fs, new Path(familyDir, Bytes.toString(A)), A, A);
// Bulk load
new LoadIncrementalHFiles(conf).doBulkLoad(dir, util.getAdmin(), table, locator);
verifyMethodResult(SimpleRegionObserver.class, new String[] { "hadPreBulkLoadHFile", "hadPostBulkLoadHFile" }, tableName, new Boolean[] { true, true });
} finally {
util.deleteTable(tableName);
table.close();
}
}
Aggregations