use of org.apache.hadoop.hbase.client.RegionLocator in project hbase by apache.
the class TestRemoveRegionMetrics method testMoveRegion.
@Test
public void testMoveRegion() throws IOException, InterruptedException {
String tableNameString = name.getMethodName();
TableName tableName = TableName.valueOf(tableNameString);
Table t = TEST_UTIL.createTable(tableName, Bytes.toBytes("D"));
TEST_UTIL.waitUntilAllRegionsAssigned(t.getName());
Admin admin = TEST_UTIL.getAdmin();
HRegionInfo regionInfo;
byte[] row = Bytes.toBytes("r1");
for (int i = 0; i < 30; i++) {
boolean moved = false;
try (RegionLocator locator = TEST_UTIL.getConnection().getRegionLocator(tableName)) {
regionInfo = locator.getRegionLocation(row, true).getRegionInfo();
}
int currentServerIdx = cluster.getServerWith(regionInfo.getRegionName());
int destServerIdx = (currentServerIdx + 1) % cluster.getLiveRegionServerThreads().size();
HRegionServer currentServer = cluster.getRegionServer(currentServerIdx);
HRegionServer destServer = cluster.getRegionServer(destServerIdx);
// Do a put. The counters should be non-zero now
Put p = new Put(row);
p.addColumn(Bytes.toBytes("D"), Bytes.toBytes("Zero"), Bytes.toBytes("VALUE"));
t.put(p);
MetricsRegionAggregateSource currentAgg = currentServer.getRegion(regionInfo.getRegionName()).getMetrics().getSource().getAggregateSource();
String prefix = "namespace_" + NamespaceDescriptor.DEFAULT_NAMESPACE_NAME_STR + "_table_" + tableNameString + "_region_" + regionInfo.getEncodedName() + "_metric";
metricsHelper.assertCounter(prefix + "_mutateCount", 1, currentAgg);
try {
TEST_UTIL.moveRegionAndWait(regionInfo, destServer.getServerName());
moved = true;
} catch (IOException ioe) {
moved = false;
}
if (moved) {
MetricsRegionAggregateSource destAgg = destServer.getRegion(regionInfo.getRegionName()).getMetrics().getSource().getAggregateSource();
metricsHelper.assertCounter(prefix + "_mutateCount", 0, destAgg);
}
}
TEST_UTIL.deleteTable(tableName);
}
use of org.apache.hadoop.hbase.client.RegionLocator in project hbase by apache.
the class TestRegionServerNoMaster method before.
@BeforeClass
public static void before() throws Exception {
HTU.startMiniCluster(NB_SERVERS);
final TableName tableName = TableName.valueOf(TestRegionServerNoMaster.class.getSimpleName());
// Create table then get the single region for our new table.
table = HTU.createTable(tableName, HConstants.CATALOG_FAMILY);
Put p = new Put(row);
p.addColumn(HConstants.CATALOG_FAMILY, row, row);
table.put(p);
try (RegionLocator locator = HTU.getConnection().getRegionLocator(tableName)) {
hri = locator.getRegionLocation(row, false).getRegionInfo();
}
regionName = hri.getRegionName();
stopMasterAndAssignMeta(HTU);
}
use of org.apache.hadoop.hbase.client.RegionLocator in project hbase by apache.
the class TestAccessController method testRegionOffline.
@Test(timeout = 180000)
public void testRegionOffline() throws Exception {
List<HRegionLocation> regions;
try (RegionLocator locator = systemUserConnection.getRegionLocator(TEST_TABLE)) {
regions = locator.getAllRegionLocations();
}
HRegionLocation location = regions.get(0);
final HRegionInfo hri = location.getRegionInfo();
AccessTestAction action = new AccessTestAction() {
@Override
public Object run() throws Exception {
ACCESS_CONTROLLER.preRegionOffline(ObserverContext.createAndPrepare(CP_ENV, null), hri);
return null;
}
};
verifyAllowed(action, SUPERUSER, USER_ADMIN, USER_OWNER, USER_GROUP_ADMIN);
verifyDenied(action, USER_CREATE, USER_RW, USER_RO, USER_NONE, USER_GROUP_READ, USER_GROUP_WRITE, USER_GROUP_CREATE);
}
use of org.apache.hadoop.hbase.client.RegionLocator in project hbase by apache.
the class TestRegionReplicaReplicationEndpointNoMaster method beforeClass.
@BeforeClass
public static void beforeClass() throws Exception {
Configuration conf = HTU.getConfiguration();
conf.setBoolean(ServerRegionReplicaUtil.REGION_REPLICA_REPLICATION_CONF_KEY, true);
conf.setBoolean(ServerRegionReplicaUtil.REGION_REPLICA_WAIT_FOR_PRIMARY_FLUSH_CONF_KEY, false);
// install WALObserver coprocessor for tests
String walCoprocs = HTU.getConfiguration().get(CoprocessorHost.WAL_COPROCESSOR_CONF_KEY);
if (walCoprocs == null) {
walCoprocs = WALEditCopro.class.getName();
} else {
walCoprocs += "," + WALEditCopro.class.getName();
}
HTU.getConfiguration().set(CoprocessorHost.WAL_COPROCESSOR_CONF_KEY, walCoprocs);
HTU.startMiniCluster(NB_SERVERS);
// Create table then get the single region for our new table.
HTableDescriptor htd = HTU.createTableDescriptor(tableName.getNameAsString());
table = HTU.createTable(htd, new byte[][] { f }, null);
try (RegionLocator locator = HTU.getConnection().getRegionLocator(tableName)) {
hriPrimary = locator.getRegionLocation(row, false).getRegionInfo();
}
// mock a secondary region info to open
hriSecondary = new HRegionInfo(hriPrimary.getTable(), hriPrimary.getStartKey(), hriPrimary.getEndKey(), hriPrimary.isSplit(), hriPrimary.getRegionId(), 1);
// No master
TestRegionServerNoMaster.stopMasterAndAssignMeta(HTU);
rs0 = HTU.getMiniHBaseCluster().getRegionServer(0);
rs1 = HTU.getMiniHBaseCluster().getRegionServer(1);
}
use of org.apache.hadoop.hbase.client.RegionLocator in project hbase by apache.
the class TestReplicationSink method testReplicateEntriesForHFiles.
/**
* Test replicateEntries with a bulk load entry for 25 HFiles
*/
@Test
public void testReplicateEntriesForHFiles() throws Exception {
Path dir = TEST_UTIL.getDataTestDirOnTestFS("testReplicateEntries");
Path familyDir = new Path(dir, Bytes.toString(FAM_NAME1));
int numRows = 10;
List<Path> p = new ArrayList<>(1);
// 1. Generate 25 hfile ranges
Random rng = new SecureRandom();
Set<Integer> numbers = new HashSet<>();
while (numbers.size() < 50) {
numbers.add(rng.nextInt(1000));
}
List<Integer> numberList = new ArrayList<>(numbers);
Collections.sort(numberList);
Map<String, Long> storeFilesSize = new HashMap<>(1);
// 2. Create 25 hfiles
Configuration conf = TEST_UTIL.getConfiguration();
FileSystem fs = dir.getFileSystem(conf);
Iterator<Integer> numbersItr = numberList.iterator();
for (int i = 0; i < 25; i++) {
Path hfilePath = new Path(familyDir, "hfile_" + i);
HFileTestUtil.createHFile(conf, fs, hfilePath, FAM_NAME1, FAM_NAME1, Bytes.toBytes(numbersItr.next()), Bytes.toBytes(numbersItr.next()), numRows);
p.add(hfilePath);
storeFilesSize.put(hfilePath.getName(), fs.getFileStatus(hfilePath).getLen());
}
// 3. Create a BulkLoadDescriptor and a WALEdit
Map<byte[], List<Path>> storeFiles = new HashMap<>(1);
storeFiles.put(FAM_NAME1, p);
WALEdit edit = null;
WALProtos.BulkLoadDescriptor loadDescriptor = null;
try (Connection c = ConnectionFactory.createConnection(conf);
RegionLocator l = c.getRegionLocator(TABLE_NAME1)) {
HRegionInfo regionInfo = l.getAllRegionLocations().get(0).getRegionInfo();
loadDescriptor = ProtobufUtil.toBulkLoadDescriptor(TABLE_NAME1, UnsafeByteOperations.unsafeWrap(regionInfo.getEncodedNameAsBytes()), storeFiles, storeFilesSize, 1);
edit = WALEdit.createBulkLoadEvent(regionInfo, loadDescriptor);
}
List<WALEntry> entries = new ArrayList<>(1);
// 4. Create a WALEntryBuilder
WALEntry.Builder builder = createWALEntryBuilder(TABLE_NAME1);
// 5. Copy the hfile to the path as it is in reality
for (int i = 0; i < 25; i++) {
String pathToHfileFromNS = new StringBuilder(100).append(TABLE_NAME1.getNamespaceAsString()).append(Path.SEPARATOR).append(Bytes.toString(TABLE_NAME1.getName())).append(Path.SEPARATOR).append(Bytes.toString(loadDescriptor.getEncodedRegionName().toByteArray())).append(Path.SEPARATOR).append(Bytes.toString(FAM_NAME1)).append(Path.SEPARATOR).append("hfile_" + i).toString();
String dst = baseNamespaceDir + Path.SEPARATOR + pathToHfileFromNS;
FileUtil.copy(fs, p.get(0), fs, new Path(dst), false, conf);
}
entries.add(builder.build());
try (ResultScanner scanner = table1.getScanner(new Scan())) {
// 6. Assert no existing data in table
assertEquals(0, scanner.next(numRows).length);
}
// 7. Replicate the bulk loaded entry
SINK.replicateEntries(entries, CellUtil.createCellScanner(edit.getCells().iterator()), replicationClusterId, baseNamespaceDir, hfileArchiveDir);
try (ResultScanner scanner = table1.getScanner(new Scan())) {
// 8. Assert data is replicated
assertEquals(numRows, scanner.next(numRows).length);
}
}
Aggregations