use of org.apache.hadoop.hbase.client.RegionLocator in project hbase by apache.
the class TestRegionServerNoMaster method before.
@BeforeClass
public static void before() throws Exception {
HTU.startMiniCluster(NB_SERVERS);
final TableName tableName = TableName.valueOf(TestRegionServerNoMaster.class.getSimpleName());
// Create table then get the single region for our new table.
table = HTU.createTable(tableName, HConstants.CATALOG_FAMILY);
Put p = new Put(row);
p.addColumn(HConstants.CATALOG_FAMILY, row, row);
table.put(p);
try (RegionLocator locator = HTU.getConnection().getRegionLocator(tableName)) {
hri = locator.getRegionLocation(row, false).getRegionInfo();
}
regionName = hri.getRegionName();
stopMasterAndAssignMeta(HTU);
}
use of org.apache.hadoop.hbase.client.RegionLocator in project hbase by apache.
the class TestAccessController method testRegionOffline.
@Test(timeout = 180000)
public void testRegionOffline() throws Exception {
List<HRegionLocation> regions;
try (RegionLocator locator = systemUserConnection.getRegionLocator(TEST_TABLE)) {
regions = locator.getAllRegionLocations();
}
HRegionLocation location = regions.get(0);
final HRegionInfo hri = location.getRegionInfo();
AccessTestAction action = new AccessTestAction() {
@Override
public Object run() throws Exception {
ACCESS_CONTROLLER.preRegionOffline(ObserverContext.createAndPrepare(CP_ENV, null), hri);
return null;
}
};
verifyAllowed(action, SUPERUSER, USER_ADMIN, USER_OWNER, USER_GROUP_ADMIN);
verifyDenied(action, USER_CREATE, USER_RW, USER_RO, USER_NONE, USER_GROUP_READ, USER_GROUP_WRITE, USER_GROUP_CREATE);
}
use of org.apache.hadoop.hbase.client.RegionLocator in project hbase by apache.
the class TestRegionReplicaReplicationEndpointNoMaster method beforeClass.
@BeforeClass
public static void beforeClass() throws Exception {
Configuration conf = HTU.getConfiguration();
conf.setBoolean(ServerRegionReplicaUtil.REGION_REPLICA_REPLICATION_CONF_KEY, true);
conf.setBoolean(ServerRegionReplicaUtil.REGION_REPLICA_WAIT_FOR_PRIMARY_FLUSH_CONF_KEY, false);
// install WALObserver coprocessor for tests
String walCoprocs = HTU.getConfiguration().get(CoprocessorHost.WAL_COPROCESSOR_CONF_KEY);
if (walCoprocs == null) {
walCoprocs = WALEditCopro.class.getName();
} else {
walCoprocs += "," + WALEditCopro.class.getName();
}
HTU.getConfiguration().set(CoprocessorHost.WAL_COPROCESSOR_CONF_KEY, walCoprocs);
HTU.startMiniCluster(NB_SERVERS);
// Create table then get the single region for our new table.
HTableDescriptor htd = HTU.createTableDescriptor(tableName.getNameAsString());
table = HTU.createTable(htd, new byte[][] { f }, null);
try (RegionLocator locator = HTU.getConnection().getRegionLocator(tableName)) {
hriPrimary = locator.getRegionLocation(row, false).getRegionInfo();
}
// mock a secondary region info to open
hriSecondary = new HRegionInfo(hriPrimary.getTable(), hriPrimary.getStartKey(), hriPrimary.getEndKey(), hriPrimary.isSplit(), hriPrimary.getRegionId(), 1);
// No master
TestRegionServerNoMaster.stopMasterAndAssignMeta(HTU);
rs0 = HTU.getMiniHBaseCluster().getRegionServer(0);
rs1 = HTU.getMiniHBaseCluster().getRegionServer(1);
}
use of org.apache.hadoop.hbase.client.RegionLocator in project hbase by apache.
the class TestReplicationSink method testReplicateEntriesForHFiles.
/**
* Test replicateEntries with a bulk load entry for 25 HFiles
*/
@Test
public void testReplicateEntriesForHFiles() throws Exception {
Path dir = TEST_UTIL.getDataTestDirOnTestFS("testReplicateEntries");
Path familyDir = new Path(dir, Bytes.toString(FAM_NAME1));
int numRows = 10;
List<Path> p = new ArrayList<>(1);
// 1. Generate 25 hfile ranges
Random rng = new SecureRandom();
Set<Integer> numbers = new HashSet<>();
while (numbers.size() < 50) {
numbers.add(rng.nextInt(1000));
}
List<Integer> numberList = new ArrayList<>(numbers);
Collections.sort(numberList);
Map<String, Long> storeFilesSize = new HashMap<>(1);
// 2. Create 25 hfiles
Configuration conf = TEST_UTIL.getConfiguration();
FileSystem fs = dir.getFileSystem(conf);
Iterator<Integer> numbersItr = numberList.iterator();
for (int i = 0; i < 25; i++) {
Path hfilePath = new Path(familyDir, "hfile_" + i);
HFileTestUtil.createHFile(conf, fs, hfilePath, FAM_NAME1, FAM_NAME1, Bytes.toBytes(numbersItr.next()), Bytes.toBytes(numbersItr.next()), numRows);
p.add(hfilePath);
storeFilesSize.put(hfilePath.getName(), fs.getFileStatus(hfilePath).getLen());
}
// 3. Create a BulkLoadDescriptor and a WALEdit
Map<byte[], List<Path>> storeFiles = new HashMap<>(1);
storeFiles.put(FAM_NAME1, p);
WALEdit edit = null;
WALProtos.BulkLoadDescriptor loadDescriptor = null;
try (Connection c = ConnectionFactory.createConnection(conf);
RegionLocator l = c.getRegionLocator(TABLE_NAME1)) {
HRegionInfo regionInfo = l.getAllRegionLocations().get(0).getRegionInfo();
loadDescriptor = ProtobufUtil.toBulkLoadDescriptor(TABLE_NAME1, UnsafeByteOperations.unsafeWrap(regionInfo.getEncodedNameAsBytes()), storeFiles, storeFilesSize, 1);
edit = WALEdit.createBulkLoadEvent(regionInfo, loadDescriptor);
}
List<WALEntry> entries = new ArrayList<>(1);
// 4. Create a WALEntryBuilder
WALEntry.Builder builder = createWALEntryBuilder(TABLE_NAME1);
// 5. Copy the hfile to the path as it is in reality
for (int i = 0; i < 25; i++) {
String pathToHfileFromNS = new StringBuilder(100).append(TABLE_NAME1.getNamespaceAsString()).append(Path.SEPARATOR).append(Bytes.toString(TABLE_NAME1.getName())).append(Path.SEPARATOR).append(Bytes.toString(loadDescriptor.getEncodedRegionName().toByteArray())).append(Path.SEPARATOR).append(Bytes.toString(FAM_NAME1)).append(Path.SEPARATOR).append("hfile_" + i).toString();
String dst = baseNamespaceDir + Path.SEPARATOR + pathToHfileFromNS;
FileUtil.copy(fs, p.get(0), fs, new Path(dst), false, conf);
}
entries.add(builder.build());
try (ResultScanner scanner = table1.getScanner(new Scan())) {
// 6. Assert no existing data in table
assertEquals(0, scanner.next(numRows).length);
}
// 7. Replicate the bulk loaded entry
SINK.replicateEntries(entries, CellUtil.createCellScanner(edit.getCells().iterator()), replicationClusterId, baseNamespaceDir, hfileArchiveDir);
try (ResultScanner scanner = table1.getScanner(new Scan())) {
// 8. Assert data is replicated
assertEquals(numRows, scanner.next(numRows).length);
}
}
use of org.apache.hadoop.hbase.client.RegionLocator in project hbase by apache.
the class OfflineMetaRebuildTestCore method deleteRegion.
protected void deleteRegion(Configuration conf, final Table tbl, byte[] startKey, byte[] endKey) throws IOException {
LOG.info("Before delete:");
HTableDescriptor htd = tbl.getTableDescriptor();
dumpMeta(htd);
List<HRegionLocation> regions;
try (RegionLocator rl = connection.getRegionLocator(tbl.getName())) {
regions = rl.getAllRegionLocations();
}
for (HRegionLocation e : regions) {
HRegionInfo hri = e.getRegionInfo();
ServerName hsa = e.getServerName();
if (Bytes.compareTo(hri.getStartKey(), startKey) == 0 && Bytes.compareTo(hri.getEndKey(), endKey) == 0) {
LOG.info("RegionName: " + hri.getRegionNameAsString());
byte[] deleteRow = hri.getRegionName();
TEST_UTIL.getAdmin().unassign(deleteRow, true);
LOG.info("deleting hdfs data: " + hri.toString() + hsa.toString());
Path rootDir = FSUtils.getRootDir(conf);
FileSystem fs = rootDir.getFileSystem(conf);
Path p = new Path(FSUtils.getTableDir(rootDir, htd.getTableName()), hri.getEncodedName());
fs.delete(p, true);
try (Table meta = this.connection.getTable(TableName.META_TABLE_NAME)) {
Delete delete = new Delete(deleteRow);
meta.delete(delete);
}
}
LOG.info(hri.toString() + hsa.toString());
}
TEST_UTIL.getMetaTableRows(htd.getTableName());
LOG.info("After delete:");
dumpMeta(htd);
}
Aggregations