use of org.apache.hadoop.hbase.SingleProcessHBaseCluster in project hbase by apache.
the class TestScannersFromClientSide method testScanOnReopenedRegion.
/**
* Test from client side for scan while the region is reopened
* on the same region server.
*/
@Test
public void testScanOnReopenedRegion() throws Exception {
final TableName tableName = name.getTableName();
byte[][] QUALIFIERS = HTestConst.makeNAscii(QUALIFIER, 2);
Table ht = TEST_UTIL.createTable(tableName, FAMILY);
Put put;
Scan scan;
Result result;
ResultScanner scanner;
boolean toLog = false;
List<Cell> kvListExp;
// table: row, family, c0:0, c1:1
put = new Put(ROW);
for (int i = 0; i < QUALIFIERS.length; i++) {
KeyValue kv = new KeyValue(ROW, FAMILY, QUALIFIERS[i], i, VALUE);
put.add(kv);
}
ht.put(put);
scan = new Scan().withStartRow(ROW);
scanner = ht.getScanner(scan);
HRegionLocation loc;
try (RegionLocator locator = TEST_UTIL.getConnection().getRegionLocator(tableName)) {
loc = locator.getRegionLocation(ROW);
}
RegionInfo hri = loc.getRegion();
SingleProcessHBaseCluster cluster = TEST_UTIL.getMiniHBaseCluster();
byte[] regionName = hri.getRegionName();
int i = cluster.getServerWith(regionName);
HRegionServer rs = cluster.getRegionServer(i);
LOG.info("Unassigning " + hri);
TEST_UTIL.getAdmin().unassign(hri.getRegionName(), true);
long startTime = EnvironmentEdgeManager.currentTime();
long timeOut = 10000;
boolean offline = false;
while (true) {
if (rs.getOnlineRegion(regionName) == null) {
offline = true;
break;
}
assertTrue("Timed out in closing the testing region", EnvironmentEdgeManager.currentTime() < startTime + timeOut);
}
assertTrue(offline);
LOG.info("Assigning " + hri);
TEST_UTIL.getAdmin().assign(hri.getRegionName());
startTime = EnvironmentEdgeManager.currentTime();
while (true) {
rs = cluster.getRegionServer(cluster.getServerWith(regionName));
if (rs != null && rs.getOnlineRegion(regionName) != null) {
offline = false;
break;
}
assertTrue("Timed out in open the testing region", EnvironmentEdgeManager.currentTime() < startTime + timeOut);
}
assertFalse(offline);
// c0:0, c1:1
kvListExp = new ArrayList<>();
kvListExp.add(new KeyValue(ROW, FAMILY, QUALIFIERS[0], 0, VALUE));
kvListExp.add(new KeyValue(ROW, FAMILY, QUALIFIERS[1], 1, VALUE));
result = scanner.next();
verifyResult(result, kvListExp, toLog, "Testing scan on re-opened region");
}
use of org.apache.hadoop.hbase.SingleProcessHBaseCluster in project hbase by apache.
the class TestSeparateClientZKCluster method testMasterSwitch.
@Test
public void testMasterSwitch() throws Exception {
// get an admin instance and issue some request first
Connection conn = TEST_UTIL.getConnection();
try (Admin admin = conn.getAdmin()) {
LOG.debug("Tables: " + admin.listTableDescriptors());
SingleProcessHBaseCluster cluster = TEST_UTIL.getHBaseCluster();
// switch active master
HMaster master = cluster.getMaster();
master.stopMaster();
LOG.info("Stopped master {}", master.getServerName());
while (master.isAlive()) {
Thread.sleep(200);
}
LOG.info("Shutdown master {}", master.getServerName());
while (cluster.getMaster() == null || !cluster.getMaster().isInitialized()) {
LOG.info("Get master {}", cluster.getMaster() == null ? "null" : cluster.getMaster().getServerName());
Thread.sleep(200);
}
LOG.info("Got master {}", cluster.getMaster().getServerName());
// confirm client access still works
assertTrue(admin.balance(BalanceRequest.defaultInstance()).isBalancerRan());
}
}
use of org.apache.hadoop.hbase.SingleProcessHBaseCluster in project hbase by apache.
the class TestSeparateClientZKCluster method testMetaMoveDuringClientZkClusterRestart.
@Test
public void testMetaMoveDuringClientZkClusterRestart() throws Exception {
TableName tn = name.getTableName();
// create table
Connection conn = TEST_UTIL.getConnection();
try (Admin admin = conn.getAdmin();
Table table = conn.getTable(tn)) {
ColumnFamilyDescriptorBuilder cfDescBuilder = ColumnFamilyDescriptorBuilder.newBuilder(family);
TableDescriptorBuilder tableDescBuilder = TableDescriptorBuilder.newBuilder(tn).setColumnFamily(cfDescBuilder.build());
admin.createTable(tableDescBuilder.build());
// put some data
Put put = new Put(row);
put.addColumn(family, qualifier, value);
table.put(put);
// invalid connection cache
conn.clearRegionLocationCache();
// stop client zk cluster
clientZkCluster.shutdown();
// stop current meta server and confirm the server shutdown process
// is not affected by client ZK crash
SingleProcessHBaseCluster cluster = TEST_UTIL.getHBaseCluster();
int metaServerId = cluster.getServerWithMeta();
HRegionServer metaServer = cluster.getRegionServer(metaServerId);
metaServer.stop("Stop current RS holding meta region");
while (metaServer.isAlive()) {
Thread.sleep(200);
}
// wait for meta region online
AssignmentTestingUtil.waitForAssignment(cluster.getMaster().getAssignmentManager(), RegionInfoBuilder.FIRST_META_REGIONINFO);
// wait some long time to make sure we will retry sync data to client ZK until data set
Thread.sleep(10000);
clientZkCluster.startup(clientZkDir);
// new request should pass
Get get = new Get(row);
Result result = table.get(get);
LOG.debug("Result: " + Bytes.toString(result.getValue(family, qualifier)));
assertArrayEquals(value, result.getValue(family, qualifier));
}
}
use of org.apache.hadoop.hbase.SingleProcessHBaseCluster in project hbase by apache.
the class TestCoprocessorStop method testStopped.
@Test
public void testStopped() throws Exception {
// shutdown hbase only. then check flag file.
SingleProcessHBaseCluster cluster = UTIL.getHBaseCluster();
LOG.info("shutdown hbase cluster...");
cluster.shutdown();
LOG.info("wait for the hbase cluster shutdown...");
cluster.waitUntilShutDown();
Configuration conf = UTIL.getConfiguration();
FileSystem fs = FileSystem.get(conf);
Path resultFile = new Path(UTIL.getDataTestDirOnTestFS(), MASTER_FILE);
assertTrue("Master flag file should have been created", fs.exists(resultFile));
resultFile = new Path(UTIL.getDataTestDirOnTestFS(), REGIONSERVER_FILE);
assertTrue("RegionServer flag file should have been created", fs.exists(resultFile));
}
use of org.apache.hadoop.hbase.SingleProcessHBaseCluster in project hbase by apache.
the class TestHBCKSCP method test.
@Test
public void test() throws Exception {
// we are about to do one for it?
SingleProcessHBaseCluster cluster = this.util.getHBaseCluster();
// Assert that we have three RegionServers. Test depends on there being multiple.
assertEquals(RS_COUNT, cluster.getLiveRegionServerThreads().size());
int count;
try (Table table = createTable(TableName.valueOf(this.name.getMethodName()))) {
// Load the table with a bit of data so some logs to split and some edits in each region.
this.util.loadTable(table, HBaseTestingUtil.COLUMNS[0]);
count = util.countRows(table);
}
assertTrue("expected some rows", count > 0);
// Make the test easier by not working on server hosting meta...
// Find another RS. Purge it from Master memory w/o running SCP (if
// SCP runs, it will clear entries from hbase:meta which frustrates
// our attempt at manufacturing 'Unknown Servers' condition).
int metaIndex = this.util.getMiniHBaseCluster().getServerWithMeta();
int rsIndex = (metaIndex + 1) % RS_COUNT;
ServerName rsServerName = cluster.getRegionServer(rsIndex).getServerName();
HMaster master = cluster.getMaster();
// Get a Region that is on the server.
RegionInfo rsRI = master.getAssignmentManager().getRegionsOnServer(rsServerName).get(0);
Result r = MetaTableAccessor.getRegionResult(master.getConnection(), rsRI.getRegionName());
// Assert region is OPEN.
assertEquals(RegionState.State.OPEN.toString(), Bytes.toString(r.getValue(HConstants.CATALOG_FAMILY, HConstants.STATE_QUALIFIER)));
ServerName serverName = CatalogFamilyFormat.getServerName(r, 0);
assertEquals(rsServerName, serverName);
// moveFrom adds to dead servers and adds it to processing list only we will
// not be processing this server 'normally'. Remove it from processing by
// calling 'finish' and then remove it from dead servers so rsServerName
// becomes an 'Unknown Server' even though it is still around.
LOG.info("Killing {}", rsServerName);
cluster.killRegionServer(rsServerName);
master.getServerManager().moveFromOnlineToDeadServers(rsServerName);
master.getServerManager().getDeadServers().removeDeadServer(rsServerName);
master.getAssignmentManager().getRegionStates().removeServer(rsServerName);
// Kill the server. Nothing should happen since an 'Unknown Server' as far
// as the Master is concerned; i.e. no SCP.
HRegionServer hrs = cluster.getRegionServer(rsServerName);
while (!hrs.isStopped()) {
Threads.sleep(10);
}
LOG.info("Dead {}", rsServerName);
// Now assert still references in hbase:meta to the 'dead' server -- they haven't been
// cleaned up by an SCP or by anything else.
assertTrue(searchMeta(master, rsServerName));
// Assert region is OPEN on dead server still.
r = MetaTableAccessor.getRegionResult(master.getConnection(), rsRI.getRegionName());
assertEquals(RegionState.State.OPEN.toString(), Bytes.toString(r.getValue(HConstants.CATALOG_FAMILY, HConstants.STATE_QUALIFIER)));
serverName = CatalogFamilyFormat.getServerName(r, 0);
assertNotNull(cluster.getRegionServer(serverName));
assertEquals(rsServerName, serverName);
// I now have 'Unknown Server' references in hbase:meta; i.e. Server references
// with no corresponding SCP. Queue one.
long pid = scheduleHBCKSCP(rsServerName, master);
assertNotEquals(Procedure.NO_PROC_ID, pid);
while (master.getMasterProcedureExecutor().getActiveProcIds().contains(pid)) {
Threads.sleep(10);
}
// After SCP, assert region is OPEN on new server.
r = MetaTableAccessor.getRegionResult(master.getConnection(), rsRI.getRegionName());
assertEquals(RegionState.State.OPEN.toString(), Bytes.toString(r.getValue(HConstants.CATALOG_FAMILY, HConstants.STATE_QUALIFIER)));
serverName = CatalogFamilyFormat.getServerName(r, 0);
assertNotNull(cluster.getRegionServer(serverName));
assertNotEquals(rsServerName, serverName);
// Make sure no mention of old server post SCP.
assertFalse(searchMeta(master, rsServerName));
}
Aggregations