use of org.apache.hadoop.hbase.HRegionInfo in project hbase by apache.
the class TestAssignmentManagerOnCluster method testMoveRegion.
/**
* This tests moving a region
*/
@Test(timeout = 50000)
public void testMoveRegion() throws Exception {
final TableName tableName = TableName.valueOf(name.getMethodName());
try {
HRegionInfo hri = createTableAndGetOneRegion(tableName);
HMaster master = TEST_UTIL.getHBaseCluster().getMaster();
RegionStates regionStates = master.getAssignmentManager().getRegionStates();
ServerName serverName = regionStates.getRegionServerOfRegion(hri);
ServerManager serverManager = master.getServerManager();
ServerName destServerName = null;
List<JVMClusterUtil.RegionServerThread> regionServers = TEST_UTIL.getHBaseCluster().getLiveRegionServerThreads();
for (JVMClusterUtil.RegionServerThread regionServer : regionServers) {
HRegionServer destServer = regionServer.getRegionServer();
destServerName = destServer.getServerName();
if (!destServerName.equals(serverName) && serverManager.isServerOnline(destServerName)) {
break;
}
}
assertTrue(destServerName != null && !destServerName.equals(serverName));
TEST_UTIL.getAdmin().move(hri.getEncodedNameAsBytes(), Bytes.toBytes(destServerName.getServerName()));
long timeoutTime = System.currentTimeMillis() + 30000;
while (true) {
ServerName sn = regionStates.getRegionServerOfRegion(hri);
if (sn != null && sn.equals(destServerName)) {
TEST_UTIL.assertRegionOnServer(hri, sn, 200);
break;
}
long now = System.currentTimeMillis();
if (now > timeoutTime) {
fail("Failed to move the region in time: " + regionStates.getRegionState(hri));
}
regionStates.waitForUpdate(50);
}
} finally {
TEST_UTIL.deleteTable(tableName);
}
}
use of org.apache.hadoop.hbase.HRegionInfo in project hbase by apache.
the class TestSplitTransactionOnCluster method testExistingZnodeBlocksSplitAndWeRollback.
@Test(timeout = 300000)
public void testExistingZnodeBlocksSplitAndWeRollback() throws IOException, InterruptedException {
final TableName tableName = TableName.valueOf(name.getMethodName());
// Create table then get the single region for our new table.
Table t = createTableAndWait(tableName, HConstants.CATALOG_FAMILY);
List<HRegion> regions = cluster.getRegions(tableName);
HRegionInfo hri = getAndCheckSingleTableRegion(regions);
int tableRegionIndex = ensureTableRegionNotOnSameServerAsMeta(admin, hri);
RegionStates regionStates = cluster.getMaster().getAssignmentManager().getRegionStates();
// Turn off balancer so it doesn't cut in and mess up our placements.
this.admin.setBalancerRunning(false, true);
// Turn off the meta scanner so it don't remove parent on us.
cluster.getMaster().setCatalogJanitorEnabled(false);
try {
// Add a bit of load up into the table so splittable.
TESTING_UTIL.loadTable(t, HConstants.CATALOG_FAMILY, false);
// Get region pre-split.
HRegionServer server = cluster.getRegionServer(tableRegionIndex);
printOutRegions(server, "Initial regions: ");
int regionCount = ProtobufUtil.getOnlineRegions(server.getRSRpcServices()).size();
regionStates.updateRegionState(hri, RegionState.State.CLOSING);
// Now try splitting.... should fail. And each should successfully
// rollback.
this.admin.splitRegion(hri.getRegionName());
this.admin.splitRegion(hri.getRegionName());
this.admin.splitRegion(hri.getRegionName());
// Wait around a while and assert count of regions remains constant.
for (int i = 0; i < 10; i++) {
Thread.sleep(100);
assertEquals(regionCount, ProtobufUtil.getOnlineRegions(server.getRSRpcServices()).size());
}
regionStates.regionOnline(hri, server.getServerName());
// Now try splitting and it should work.
split(hri, server, regionCount);
// Get daughters
checkAndGetDaughters(tableName);
// OK, so split happened after we cleared the blocking node.
} finally {
admin.setBalancerRunning(true, false);
cluster.getMaster().setCatalogJanitorEnabled(true);
t.close();
}
}
use of org.apache.hadoop.hbase.HRegionInfo in project hbase by apache.
the class TestStore method init.
@SuppressWarnings("deprecation")
private Store init(String methodName, Configuration conf, HTableDescriptor htd, HColumnDescriptor hcd) throws IOException {
//Setting up a Store
Path basedir = new Path(DIR + methodName);
Path tableDir = FSUtils.getTableDir(basedir, htd.getTableName());
final Path logdir = new Path(basedir, AbstractFSWALProvider.getWALDirectoryName(methodName));
FileSystem fs = FileSystem.get(conf);
fs.delete(logdir, true);
if (htd.hasFamily(hcd.getName())) {
htd.modifyFamily(hcd);
} else {
htd.addFamily(hcd);
}
HRegionInfo info = new HRegionInfo(htd.getTableName(), null, null, false);
final Configuration walConf = new Configuration(conf);
FSUtils.setRootDir(walConf, basedir);
final WALFactory wals = new WALFactory(walConf, null, methodName);
HRegion region = new HRegion(tableDir, wals.getWAL(info.getEncodedNameAsBytes(), info.getTable().getNamespace()), fs, conf, info, htd, null);
store = new HStore(region, hcd, conf);
return store;
}
use of org.apache.hadoop.hbase.HRegionInfo in project hbase by apache.
the class TestStoreFile method testReference.
/**
* Test that our mechanism of writing store files in one region to reference
* store files in other regions works.
* @throws IOException
*/
@Test
public void testReference() throws IOException {
final HRegionInfo hri = new HRegionInfo(TableName.valueOf("testReferenceTb"));
HRegionFileSystem regionFs = HRegionFileSystem.createRegionOnFileSystem(conf, fs, new Path(testDir, hri.getTable().getNameAsString()), hri);
HFileContext meta = new HFileContextBuilder().withBlockSize(8 * 1024).build();
// Make a store file and write data to it.
StoreFileWriter writer = new StoreFileWriter.Builder(conf, cacheConf, this.fs).withFilePath(regionFs.createTempName()).withFileContext(meta).build();
writeStoreFile(writer);
Path hsfPath = regionFs.commitStoreFile(TEST_FAMILY, writer.getPath());
StoreFile hsf = new StoreFile(this.fs, hsfPath, conf, cacheConf, BloomType.NONE);
StoreFileReader reader = hsf.createReader();
// Split on a row, not in middle of row. Midkey returned by reader
// may be in middle of row. Create new one with empty column and
// timestamp.
Cell kv = reader.midkey();
byte[] midRow = CellUtil.cloneRow(kv);
kv = reader.getLastKey();
byte[] finalRow = CellUtil.cloneRow(kv);
hsf.closeReader(true);
// Make a reference
HRegionInfo splitHri = new HRegionInfo(hri.getTable(), null, midRow);
Path refPath = splitStoreFile(regionFs, splitHri, TEST_FAMILY, hsf, midRow, true);
StoreFile refHsf = new StoreFile(this.fs, refPath, conf, cacheConf, BloomType.NONE);
// Now confirm that I can read from the reference and that it only gets
// keys from top half of the file.
HFileScanner s = refHsf.createReader().getScanner(false, false);
for (boolean first = true; (!s.isSeeked() && s.seekTo()) || s.next(); ) {
ByteBuffer bb = ByteBuffer.wrap(((KeyValue) s.getKey()).getKey());
kv = KeyValueUtil.createKeyValueFromKey(bb);
if (first) {
assertTrue(Bytes.equals(kv.getRowArray(), kv.getRowOffset(), kv.getRowLength(), midRow, 0, midRow.length));
first = false;
}
}
assertTrue(Bytes.equals(kv.getRowArray(), kv.getRowOffset(), kv.getRowLength(), finalRow, 0, finalRow.length));
}
use of org.apache.hadoop.hbase.HRegionInfo in project hbase by apache.
the class TestStoreFileRefresherChore method initHRegion.
private Region initHRegion(HTableDescriptor htd, byte[] startKey, byte[] stopKey, int replicaId) throws IOException {
Configuration conf = TEST_UTIL.getConfiguration();
Path tableDir = FSUtils.getTableDir(testDir, htd.getTableName());
HRegionInfo info = new HRegionInfo(htd.getTableName(), startKey, stopKey, false, 0, replicaId);
HRegionFileSystem fs = new FailingHRegionFileSystem(conf, tableDir.getFileSystem(conf), tableDir, info);
final Configuration walConf = new Configuration(conf);
FSUtils.setRootDir(walConf, tableDir);
final WALFactory wals = new WALFactory(walConf, null, "log_" + replicaId);
HRegion region = new HRegion(fs, wals.getWAL(info.getEncodedNameAsBytes(), info.getTable().getNamespace()), conf, htd, null);
region.initialize();
return region;
}
Aggregations