use of org.apache.hadoop.hbase.HRegionInfo in project hbase by apache.
the class TestHRegionInfo method testPb.
@Test
public void testPb() throws DeserializationException {
HRegionInfo hri = HRegionInfo.FIRST_META_REGIONINFO;
byte[] bytes = hri.toByteArray();
HRegionInfo pbhri = HRegionInfo.parseFrom(bytes);
assertTrue(hri.equals(pbhri));
}
use of org.apache.hadoop.hbase.HRegionInfo in project hbase by apache.
the class TestHRegionInfo method testConvert.
@Test
public void testConvert() {
final TableName tableName = TableName.valueOf("ns1:" + name.getMethodName());
byte[] startKey = Bytes.toBytes("startKey");
byte[] endKey = Bytes.toBytes("endKey");
boolean split = false;
long regionId = System.currentTimeMillis();
int replicaId = 42;
HRegionInfo hri = new HRegionInfo(tableName, startKey, endKey, split, regionId, replicaId);
// convert two times, compare
HRegionInfo convertedHri = HRegionInfo.convert(HRegionInfo.convert(hri));
assertEquals(hri, convertedHri);
// test convert RegionInfo without replicaId
RegionInfo info = RegionInfo.newBuilder().setTableName(HBaseProtos.TableName.newBuilder().setQualifier(UnsafeByteOperations.unsafeWrap(tableName.getQualifier())).setNamespace(UnsafeByteOperations.unsafeWrap(tableName.getNamespace())).build()).setStartKey(UnsafeByteOperations.unsafeWrap(startKey)).setEndKey(UnsafeByteOperations.unsafeWrap(endKey)).setSplit(split).setRegionId(regionId).build();
convertedHri = HRegionInfo.convert(info);
HRegionInfo expectedHri = new HRegionInfo(tableName, startKey, endKey, split, regionId, // expecting default replicaId
0);
assertEquals(expectedHri, convertedHri);
}
use of org.apache.hadoop.hbase.HRegionInfo in project hbase by apache.
the class TestRegionReplicaReplicationEndpointNoMaster method beforeClass.
@BeforeClass
public static void beforeClass() throws Exception {
Configuration conf = HTU.getConfiguration();
conf.setBoolean(ServerRegionReplicaUtil.REGION_REPLICA_REPLICATION_CONF_KEY, true);
conf.setBoolean(ServerRegionReplicaUtil.REGION_REPLICA_WAIT_FOR_PRIMARY_FLUSH_CONF_KEY, false);
// install WALObserver coprocessor for tests
String walCoprocs = HTU.getConfiguration().get(CoprocessorHost.WAL_COPROCESSOR_CONF_KEY);
if (walCoprocs == null) {
walCoprocs = WALEditCopro.class.getName();
} else {
walCoprocs += "," + WALEditCopro.class.getName();
}
HTU.getConfiguration().set(CoprocessorHost.WAL_COPROCESSOR_CONF_KEY, walCoprocs);
HTU.startMiniCluster(NB_SERVERS);
// Create table then get the single region for our new table.
HTableDescriptor htd = HTU.createTableDescriptor(tableName.getNameAsString());
table = HTU.createTable(htd, new byte[][] { f }, null);
try (RegionLocator locator = HTU.getConnection().getRegionLocator(tableName)) {
hriPrimary = locator.getRegionLocation(row, false).getRegionInfo();
}
// mock a secondary region info to open
hriSecondary = new HRegionInfo(hriPrimary.getTable(), hriPrimary.getStartKey(), hriPrimary.getEndKey(), hriPrimary.isSplit(), hriPrimary.getRegionId(), 1);
// No master
TestRegionServerNoMaster.stopMasterAndAssignMeta(HTU);
rs0 = HTU.getMiniHBaseCluster().getRegionServer(0);
rs1 = HTU.getMiniHBaseCluster().getRegionServer(1);
}
use of org.apache.hadoop.hbase.HRegionInfo in project hbase by apache.
the class OfflineMetaRebuildTestCore method deleteRegion.
protected void deleteRegion(Configuration conf, final Table tbl, byte[] startKey, byte[] endKey) throws IOException {
LOG.info("Before delete:");
HTableDescriptor htd = tbl.getTableDescriptor();
dumpMeta(htd);
List<HRegionLocation> regions;
try (RegionLocator rl = connection.getRegionLocator(tbl.getName())) {
regions = rl.getAllRegionLocations();
}
for (HRegionLocation e : regions) {
HRegionInfo hri = e.getRegionInfo();
ServerName hsa = e.getServerName();
if (Bytes.compareTo(hri.getStartKey(), startKey) == 0 && Bytes.compareTo(hri.getEndKey(), endKey) == 0) {
LOG.info("RegionName: " + hri.getRegionNameAsString());
byte[] deleteRow = hri.getRegionName();
TEST_UTIL.getAdmin().unassign(deleteRow, true);
LOG.info("deleting hdfs data: " + hri.toString() + hsa.toString());
Path rootDir = FSUtils.getRootDir(conf);
FileSystem fs = rootDir.getFileSystem(conf);
Path p = new Path(FSUtils.getTableDir(rootDir, htd.getTableName()), hri.getEncodedName());
fs.delete(p, true);
try (Table meta = this.connection.getTable(TableName.META_TABLE_NAME)) {
Delete delete = new Delete(deleteRow);
meta.delete(delete);
}
}
LOG.info(hri.toString() + hsa.toString());
}
TEST_UTIL.getMetaTableRows(htd.getTableName());
LOG.info("After delete:");
dumpMeta(htd);
}
use of org.apache.hadoop.hbase.HRegionInfo in project hbase by apache.
the class OfflineMetaRebuildTestCore method createRegion.
protected HRegionInfo createRegion(Configuration conf, final Table htbl, byte[] startKey, byte[] endKey) throws IOException {
Table meta = TEST_UTIL.getConnection().getTable(TableName.META_TABLE_NAME);
HTableDescriptor htd = htbl.getTableDescriptor();
HRegionInfo hri = new HRegionInfo(htbl.getName(), startKey, endKey);
LOG.info("manually adding regioninfo and hdfs data: " + hri.toString());
Path rootDir = FSUtils.getRootDir(conf);
FileSystem fs = rootDir.getFileSystem(conf);
Path p = new Path(FSUtils.getTableDir(rootDir, htbl.getName()), hri.getEncodedName());
fs.mkdirs(p);
Path riPath = new Path(p, HRegionFileSystem.REGION_INFO_FILE);
FSDataOutputStream out = fs.create(riPath);
out.write(hri.toDelimitedByteArray());
out.close();
// add to meta.
MetaTableAccessor.addRegionToMeta(meta, hri);
meta.close();
return hri;
}
Aggregations