use of org.apache.hadoop.hbase.MiniHBaseCluster in project hbase by apache.
the class TestServerCrashProcedure method tearDown.
@After
public void tearDown() throws Exception {
MiniHBaseCluster cluster = this.util.getHBaseCluster();
HMaster master = cluster == null ? null : cluster.getMaster();
if (master != null && master.getMasterProcedureExecutor() != null) {
ProcedureTestingUtility.setKillAndToggleBeforeStoreUpdate(master.getMasterProcedureExecutor(), false);
}
this.util.shutdownMiniCluster();
}
use of org.apache.hadoop.hbase.MiniHBaseCluster in project hbase by apache.
the class TestHRegion method testgetHDFSBlocksDistribution.
@Test
public void testgetHDFSBlocksDistribution() throws Exception {
HBaseTestingUtility htu = new HBaseTestingUtility();
// Why do we set the block size in this test? If we set it smaller than the kvs, then we'll
// break up the file in to more pieces that can be distributed across the three nodes and we
// won't be able to have the condition this test asserts; that at least one node has
// a copy of all replicas -- if small block size, then blocks are spread evenly across the
// the three nodes. hfilev3 with tags seems to put us over the block size. St.Ack.
// final int DEFAULT_BLOCK_SIZE = 1024;
// htu.getConfiguration().setLong("dfs.blocksize", DEFAULT_BLOCK_SIZE);
htu.getConfiguration().setInt("dfs.replication", 2);
// set up a cluster with 3 nodes
MiniHBaseCluster cluster = null;
String[] dataNodeHosts = new String[] { "host1", "host2", "host3" };
int regionServersCount = 3;
try {
cluster = htu.startMiniCluster(1, regionServersCount, dataNodeHosts);
byte[][] families = { fam1, fam2 };
Table ht = htu.createTable(tableName, families);
// Setting up region
byte[] row = Bytes.toBytes("row1");
byte[] col = Bytes.toBytes("col1");
Put put = new Put(row);
put.addColumn(fam1, col, (long) 1, Bytes.toBytes("test1"));
put.addColumn(fam2, col, (long) 1, Bytes.toBytes("test2"));
ht.put(put);
HRegion firstRegion = htu.getHBaseCluster().getRegions(tableName).get(0);
firstRegion.flush(true);
HDFSBlocksDistribution blocksDistribution1 = firstRegion.getHDFSBlocksDistribution();
// Given the default replication factor is 2 and we have 2 HFiles,
// we will have total of 4 replica of blocks on 3 datanodes; thus there
// must be at least one host that have replica for 2 HFiles. That host's
// weight will be equal to the unique block weight.
long uniqueBlocksWeight1 = blocksDistribution1.getUniqueBlocksTotalWeight();
StringBuilder sb = new StringBuilder();
for (String host : blocksDistribution1.getTopHosts()) {
if (sb.length() > 0)
sb.append(", ");
sb.append(host);
sb.append("=");
sb.append(blocksDistribution1.getWeight(host));
}
String topHost = blocksDistribution1.getTopHosts().get(0);
long topHostWeight = blocksDistribution1.getWeight(topHost);
String msg = "uniqueBlocksWeight=" + uniqueBlocksWeight1 + ", topHostWeight=" + topHostWeight + ", topHost=" + topHost + "; " + sb.toString();
LOG.info(msg);
assertTrue(msg, uniqueBlocksWeight1 == topHostWeight);
// use the static method to compute the value, it should be the same.
// static method is used by load balancer or other components
HDFSBlocksDistribution blocksDistribution2 = HRegion.computeHDFSBlocksDistribution(htu.getConfiguration(), firstRegion.getTableDesc(), firstRegion.getRegionInfo());
long uniqueBlocksWeight2 = blocksDistribution2.getUniqueBlocksTotalWeight();
assertTrue(uniqueBlocksWeight1 == uniqueBlocksWeight2);
ht.close();
} finally {
if (cluster != null) {
htu.shutdownMiniCluster();
}
}
}
use of org.apache.hadoop.hbase.MiniHBaseCluster in project hbase by apache.
the class TestHRegionOnCluster method testDataCorrectnessReplayingRecoveredEdits.
@Test(timeout = 300000)
public void testDataCorrectnessReplayingRecoveredEdits() throws Exception {
final int NUM_MASTERS = 1;
final int NUM_RS = 3;
Admin hbaseAdmin = null;
TEST_UTIL.startMiniCluster(NUM_MASTERS, NUM_RS);
try {
final TableName tableName = TableName.valueOf(name.getMethodName());
final byte[] FAMILY = Bytes.toBytes("family");
MiniHBaseCluster cluster = TEST_UTIL.getHBaseCluster();
HMaster master = cluster.getMaster();
// Create table
HTableDescriptor desc = new HTableDescriptor(tableName);
desc.addFamily(new HColumnDescriptor(FAMILY));
hbaseAdmin = master.getConnection().getAdmin();
hbaseAdmin.createTable(desc);
assertTrue(hbaseAdmin.isTableAvailable(tableName));
// Put data: r1->v1
LOG.info("Loading r1 to v1 into " + tableName);
Table table = TEST_UTIL.getConnection().getTable(tableName);
putDataAndVerify(table, "r1", FAMILY, "v1", 1);
TEST_UTIL.waitUntilAllRegionsAssigned(table.getName());
// Move region to target server
HRegionInfo regionInfo;
try (RegionLocator locator = TEST_UTIL.getConnection().getRegionLocator(tableName)) {
regionInfo = locator.getRegionLocation(Bytes.toBytes("r1")).getRegionInfo();
}
int originServerNum = cluster.getServerWith(regionInfo.getRegionName());
HRegionServer originServer = cluster.getRegionServer(originServerNum);
int targetServerNum = (originServerNum + 1) % NUM_RS;
HRegionServer targetServer = cluster.getRegionServer(targetServerNum);
assertFalse(originServer.equals(targetServer));
TEST_UTIL.waitUntilAllRegionsAssigned(table.getName());
LOG.info("Moving " + regionInfo.getEncodedName() + " to " + targetServer.getServerName());
hbaseAdmin.move(regionInfo.getEncodedNameAsBytes(), Bytes.toBytes(targetServer.getServerName().getServerName()));
do {
Thread.sleep(1);
} while (cluster.getServerWith(regionInfo.getRegionName()) == originServerNum);
// Put data: r2->v2
LOG.info("Loading r2 to v2 into " + tableName);
putDataAndVerify(table, "r2", FAMILY, "v2", 2);
TEST_UTIL.waitUntilAllRegionsAssigned(table.getName());
// Move region to origin server
LOG.info("Moving " + regionInfo.getEncodedName() + " to " + originServer.getServerName());
hbaseAdmin.move(regionInfo.getEncodedNameAsBytes(), Bytes.toBytes(originServer.getServerName().getServerName()));
do {
Thread.sleep(1);
} while (cluster.getServerWith(regionInfo.getRegionName()) == targetServerNum);
// Put data: r3->v3
LOG.info("Loading r3 to v3 into " + tableName);
putDataAndVerify(table, "r3", FAMILY, "v3", 3);
// Kill target server
LOG.info("Killing target server " + targetServer.getServerName());
targetServer.kill();
cluster.getRegionServerThreads().get(targetServerNum).join();
// Wait until finish processing of shutdown
while (master.getServerManager().areDeadServersInProgress()) {
Thread.sleep(5);
}
// Kill origin server
LOG.info("Killing origin server " + targetServer.getServerName());
originServer.kill();
cluster.getRegionServerThreads().get(originServerNum).join();
// Put data: r4->v4
LOG.info("Loading r4 to v4 into " + tableName);
putDataAndVerify(table, "r4", FAMILY, "v4", 4);
} finally {
if (hbaseAdmin != null)
hbaseAdmin.close();
TEST_UTIL.shutdownMiniCluster();
}
}
use of org.apache.hadoop.hbase.MiniHBaseCluster in project hbase by apache.
the class TestRegionMergeTransactionOnCluster method testWholesomeMerge.
@Test
public void testWholesomeMerge() throws Exception {
LOG.info("Starting " + name.getMethodName());
final TableName tableName = TableName.valueOf(name.getMethodName());
// Create table and load data.
Table table = createTableAndLoadData(MASTER, tableName);
// Merge 1st and 2nd region
mergeRegionsAndVerifyRegionNum(MASTER, tableName, 0, 1, INITIAL_REGION_NUM - 1);
// Merge 2nd and 3th region
PairOfSameType<HRegionInfo> mergedRegions = mergeRegionsAndVerifyRegionNum(MASTER, tableName, 1, 2, INITIAL_REGION_NUM - 2);
verifyRowCount(table, ROWSIZE);
// Randomly choose one of the two merged regions
HRegionInfo hri = RandomUtils.nextBoolean() ? mergedRegions.getFirst() : mergedRegions.getSecond();
MiniHBaseCluster cluster = TEST_UTIL.getHBaseCluster();
AssignmentManager am = cluster.getMaster().getAssignmentManager();
RegionStates regionStates = am.getRegionStates();
long start = EnvironmentEdgeManager.currentTime();
while (!regionStates.isRegionInState(hri, State.MERGED)) {
assertFalse("Timed out in waiting one merged region to be in state MERGED", EnvironmentEdgeManager.currentTime() - start > 60000);
Thread.sleep(500);
}
// We should not be able to assign it again
am.assign(hri, true);
assertFalse("Merged region can't be assigned", regionStates.isRegionInTransition(hri));
assertTrue(regionStates.isRegionInState(hri, State.MERGED));
// We should not be able to unassign it either
am.unassign(hri, null);
assertFalse("Merged region can't be unassigned", regionStates.isRegionInTransition(hri));
assertTrue(regionStates.isRegionInState(hri, State.MERGED));
table.close();
}
use of org.apache.hadoop.hbase.MiniHBaseCluster in project hbase by apache.
the class TestRegionMergeTransactionOnCluster method beforeAllTests.
@BeforeClass
public static void beforeAllTests() throws Exception {
// Start a cluster
TEST_UTIL.startMiniCluster(1, NB_SERVERS, null, MyMaster.class, null);
MiniHBaseCluster cluster = TEST_UTIL.getHBaseCluster();
MASTER = cluster.getMaster();
MASTER.balanceSwitch(false);
ADMIN = TEST_UTIL.getConnection().getAdmin();
}
Aggregations