use of org.apache.hadoop.hbase.MiniHBaseCluster in project hbase by apache.
the class TestIncrementalBackup method TestIncBackupRestore.
// implement all test cases in 1 test since incremental backup/restore has dependencies
@Test
public void TestIncBackupRestore() throws Exception {
int ADD_ROWS = 99;
// #1 - create full backup for all tables
LOG.info("create full backup image for all tables");
List<TableName> tables = Lists.newArrayList(table1, table2);
final byte[] fam3Name = Bytes.toBytes("f3");
table1Desc.addFamily(new HColumnDescriptor(fam3Name));
HBaseTestingUtility.modifyTableSync(TEST_UTIL.getAdmin(), table1Desc);
Connection conn = ConnectionFactory.createConnection(conf1);
int NB_ROWS_FAM3 = 6;
insertIntoTable(conn, table1, fam3Name, 3, NB_ROWS_FAM3).close();
HBaseAdmin admin = null;
admin = (HBaseAdmin) conn.getAdmin();
BackupAdminImpl client = new BackupAdminImpl(conn);
BackupRequest request = createBackupRequest(BackupType.FULL, tables, BACKUP_ROOT_DIR);
String backupIdFull = client.backupTables(request);
assertTrue(checkSucceeded(backupIdFull));
// #2 - insert some data to table
HTable t1 = insertIntoTable(conn, table1, famName, 1, ADD_ROWS);
LOG.debug("writing " + ADD_ROWS + " rows to " + table1);
Assert.assertEquals(TEST_UTIL.countRows(t1), NB_ROWS_IN_BATCH + ADD_ROWS + NB_ROWS_FAM3);
t1.close();
LOG.debug("written " + ADD_ROWS + " rows to " + table1);
HTable t2 = (HTable) conn.getTable(table2);
Put p2;
for (int i = 0; i < 5; i++) {
p2 = new Put(Bytes.toBytes("row-t2" + i));
p2.addColumn(famName, qualName, Bytes.toBytes("val" + i));
t2.put(p2);
}
Assert.assertEquals(TEST_UTIL.countRows(t2), NB_ROWS_IN_BATCH + 5);
t2.close();
LOG.debug("written " + 5 + " rows to " + table2);
// split table1
MiniHBaseCluster cluster = TEST_UTIL.getHBaseCluster();
List<HRegion> regions = cluster.getRegions(table1);
byte[] name = regions.get(0).getRegionInfo().getRegionName();
long startSplitTime = EnvironmentEdgeManager.currentTime();
admin.splitRegion(name);
while (!admin.isTableAvailable(table1)) {
Thread.sleep(100);
}
long endSplitTime = EnvironmentEdgeManager.currentTime();
// split finished
LOG.debug("split finished in =" + (endSplitTime - startSplitTime));
// #3 - incremental backup for multiple tables
tables = Lists.newArrayList(table1, table2);
request = createBackupRequest(BackupType.INCREMENTAL, tables, BACKUP_ROOT_DIR);
String backupIdIncMultiple = client.backupTables(request);
assertTrue(checkSucceeded(backupIdIncMultiple));
// add column family f2 to table1
final byte[] fam2Name = Bytes.toBytes("f2");
table1Desc.addFamily(new HColumnDescriptor(fam2Name));
// drop column family f3
table1Desc.removeFamily(fam3Name);
HBaseTestingUtility.modifyTableSync(TEST_UTIL.getAdmin(), table1Desc);
int NB_ROWS_FAM2 = 7;
HTable t3 = insertIntoTable(conn, table1, fam2Name, 2, NB_ROWS_FAM2);
t3.close();
// #3 - incremental backup for multiple tables
request = createBackupRequest(BackupType.INCREMENTAL, tables, BACKUP_ROOT_DIR);
String backupIdIncMultiple2 = client.backupTables(request);
assertTrue(checkSucceeded(backupIdIncMultiple2));
// #4 - restore full backup for all tables, without overwrite
TableName[] tablesRestoreFull = new TableName[] { table1, table2 };
TableName[] tablesMapFull = new TableName[] { table1_restore, table2_restore };
LOG.debug("Restoring full " + backupIdFull);
client.restore(BackupUtils.createRestoreRequest(BACKUP_ROOT_DIR, backupIdFull, false, tablesRestoreFull, tablesMapFull, false));
// #5.1 - check tables for full restore
HBaseAdmin hAdmin = TEST_UTIL.getHBaseAdmin();
assertTrue(hAdmin.tableExists(table1_restore));
assertTrue(hAdmin.tableExists(table2_restore));
hAdmin.close();
// #5.2 - checking row count of tables for full restore
HTable hTable = (HTable) conn.getTable(table1_restore);
Assert.assertEquals(TEST_UTIL.countRows(hTable), NB_ROWS_IN_BATCH + NB_ROWS_FAM3);
hTable.close();
hTable = (HTable) conn.getTable(table2_restore);
Assert.assertEquals(TEST_UTIL.countRows(hTable), NB_ROWS_IN_BATCH);
hTable.close();
// #6 - restore incremental backup for multiple tables, with overwrite
TableName[] tablesRestoreIncMultiple = new TableName[] { table1, table2 };
TableName[] tablesMapIncMultiple = new TableName[] { table1_restore, table2_restore };
client.restore(BackupUtils.createRestoreRequest(BACKUP_ROOT_DIR, backupIdIncMultiple2, false, tablesRestoreIncMultiple, tablesMapIncMultiple, true));
hTable = (HTable) conn.getTable(table1_restore);
LOG.debug("After incremental restore: " + hTable.getTableDescriptor());
LOG.debug("f1 has " + TEST_UTIL.countRows(hTable, famName) + " rows");
Assert.assertEquals(TEST_UTIL.countRows(hTable, famName), NB_ROWS_IN_BATCH + ADD_ROWS);
LOG.debug("f2 has " + TEST_UTIL.countRows(hTable, fam2Name) + " rows");
Assert.assertEquals(TEST_UTIL.countRows(hTable, fam2Name), NB_ROWS_FAM2);
hTable.close();
hTable = (HTable) conn.getTable(table2_restore);
Assert.assertEquals(TEST_UTIL.countRows(hTable), NB_ROWS_IN_BATCH + 5);
hTable.close();
admin.close();
conn.close();
}
use of org.apache.hadoop.hbase.MiniHBaseCluster in project hbase by apache.
the class TestPerColumnFamilyFlush method getRegionWithName.
// Find the (first) region which has the specified name.
private static Pair<Region, HRegionServer> getRegionWithName(TableName tableName) {
MiniHBaseCluster cluster = TEST_UTIL.getMiniHBaseCluster();
List<JVMClusterUtil.RegionServerThread> rsts = cluster.getRegionServerThreads();
for (int i = 0; i < cluster.getRegionServerThreads().size(); i++) {
HRegionServer hrs = rsts.get(i).getRegionServer();
for (Region region : hrs.getOnlineRegions(tableName)) {
return Pair.newPair(region, hrs);
}
}
return null;
}
use of org.apache.hadoop.hbase.MiniHBaseCluster in project phoenix by apache.
the class PhoenixServerRpcIT method ensureTablesOnDifferentRegionServers.
/**
* Verifies that the given tables each have a single region and are on
* different region servers. If they are on the same server moves tableName2
* to the other region server.
*/
private void ensureTablesOnDifferentRegionServers(String tableName1, String tableName2) throws Exception {
byte[] table1 = Bytes.toBytes(tableName1);
byte[] table2 = Bytes.toBytes(tableName2);
HBaseAdmin admin = driver.getConnectionQueryServices(getUrl(), TEST_PROPERTIES).getAdmin();
HBaseTestingUtility util = getUtility();
MiniHBaseCluster cluster = util.getHBaseCluster();
HMaster master = cluster.getMaster();
AssignmentManager am = master.getAssignmentManager();
// verify there is only a single region for data table
List<HRegionInfo> tableRegions = admin.getTableRegions(table1);
assertEquals("Expected single region for " + table1, tableRegions.size(), 1);
HRegionInfo hri1 = tableRegions.get(0);
// verify there is only a single region for index table
tableRegions = admin.getTableRegions(table2);
HRegionInfo hri2 = tableRegions.get(0);
assertEquals("Expected single region for " + table2, tableRegions.size(), 1);
ServerName serverName1 = am.getRegionStates().getRegionServerOfRegion(hri1);
ServerName serverName2 = am.getRegionStates().getRegionServerOfRegion(hri2);
// if data table and index table are on same region server, move the index table to the other region server
if (serverName1.equals(serverName2)) {
HRegionServer server1 = util.getHBaseCluster().getRegionServer(0);
HRegionServer server2 = util.getHBaseCluster().getRegionServer(1);
HRegionServer dstServer = null;
HRegionServer srcServer = null;
if (server1.getServerName().equals(serverName2)) {
dstServer = server2;
srcServer = server1;
} else {
dstServer = server1;
srcServer = server2;
}
byte[] encodedRegionNameInBytes = hri2.getEncodedNameAsBytes();
admin.move(encodedRegionNameInBytes, Bytes.toBytes(dstServer.getServerName().getServerName()));
while (dstServer.getOnlineRegion(hri2.getRegionName()) == null || dstServer.getRegionsInTransitionInRS().containsKey(encodedRegionNameInBytes) || srcServer.getRegionsInTransitionInRS().containsKey(encodedRegionNameInBytes) || master.getAssignmentManager().getRegionStates().isRegionsInTransition()) {
// wait for the move to be finished
Thread.sleep(1);
}
}
hri1 = admin.getTableRegions(table1).get(0);
serverName1 = am.getRegionStates().getRegionServerOfRegion(hri1);
hri2 = admin.getTableRegions(table2).get(0);
serverName2 = am.getRegionStates().getRegionServerOfRegion(hri2);
// verify index and data tables are on different servers
assertNotEquals("Tables " + tableName1 + " and " + tableName2 + " should be on different region servers", serverName1, serverName2);
}
use of org.apache.hadoop.hbase.MiniHBaseCluster in project cdap by caskdata.
the class HBase98Test method forEachRegion.
@Override
public <T> Map<byte[], T> forEachRegion(byte[] tableName, Function<HRegion, T> function) {
MiniHBaseCluster hbaseCluster = getHBaseCluster();
Map<byte[], T> results = new TreeMap<>(Bytes.BYTES_COMPARATOR);
// make sure consumer config cache is updated
for (JVMClusterUtil.RegionServerThread t : hbaseCluster.getRegionServerThreads()) {
List<HRegion> serverRegions = t.getRegionServer().getOnlineRegions(TableName.valueOf(tableName));
for (HRegion region : serverRegions) {
results.put(region.getRegionName(), function.apply(region));
}
}
return results;
}
use of org.apache.hadoop.hbase.MiniHBaseCluster in project cdap by caskdata.
the class HBase96Test method forEachRegion.
@Override
public <T> Map<byte[], T> forEachRegion(byte[] tableName, Function<HRegion, T> function) {
MiniHBaseCluster hbaseCluster = getHBaseCluster();
Map<byte[], T> results = new TreeMap<>(Bytes.BYTES_COMPARATOR);
// make sure consumer config cache is updated
for (JVMClusterUtil.RegionServerThread t : hbaseCluster.getRegionServerThreads()) {
List<HRegion> serverRegions = t.getRegionServer().getOnlineRegions(TableName.valueOf(tableName));
for (HRegion region : serverRegions) {
results.put(region.getRegionName(), function.apply(region));
}
}
return results;
}
Aggregations