use of org.apache.hadoop.hbase.client.HBaseAdmin in project hbase by apache.
the class TestIncrementalBackup method TestIncBackupRestore.
// implement all test cases in 1 test since incremental backup/restore has dependencies
@Test
public void TestIncBackupRestore() throws Exception {
int ADD_ROWS = 99;
// #1 - create full backup for all tables
LOG.info("create full backup image for all tables");
List<TableName> tables = Lists.newArrayList(table1, table2);
final byte[] fam3Name = Bytes.toBytes("f3");
table1Desc.addFamily(new HColumnDescriptor(fam3Name));
HBaseTestingUtility.modifyTableSync(TEST_UTIL.getAdmin(), table1Desc);
Connection conn = ConnectionFactory.createConnection(conf1);
int NB_ROWS_FAM3 = 6;
insertIntoTable(conn, table1, fam3Name, 3, NB_ROWS_FAM3).close();
HBaseAdmin admin = null;
admin = (HBaseAdmin) conn.getAdmin();
BackupAdminImpl client = new BackupAdminImpl(conn);
BackupRequest request = createBackupRequest(BackupType.FULL, tables, BACKUP_ROOT_DIR);
String backupIdFull = client.backupTables(request);
assertTrue(checkSucceeded(backupIdFull));
// #2 - insert some data to table
HTable t1 = insertIntoTable(conn, table1, famName, 1, ADD_ROWS);
LOG.debug("writing " + ADD_ROWS + " rows to " + table1);
Assert.assertEquals(TEST_UTIL.countRows(t1), NB_ROWS_IN_BATCH + ADD_ROWS + NB_ROWS_FAM3);
t1.close();
LOG.debug("written " + ADD_ROWS + " rows to " + table1);
HTable t2 = (HTable) conn.getTable(table2);
Put p2;
for (int i = 0; i < 5; i++) {
p2 = new Put(Bytes.toBytes("row-t2" + i));
p2.addColumn(famName, qualName, Bytes.toBytes("val" + i));
t2.put(p2);
}
Assert.assertEquals(TEST_UTIL.countRows(t2), NB_ROWS_IN_BATCH + 5);
t2.close();
LOG.debug("written " + 5 + " rows to " + table2);
// split table1
MiniHBaseCluster cluster = TEST_UTIL.getHBaseCluster();
List<HRegion> regions = cluster.getRegions(table1);
byte[] name = regions.get(0).getRegionInfo().getRegionName();
long startSplitTime = EnvironmentEdgeManager.currentTime();
admin.splitRegion(name);
while (!admin.isTableAvailable(table1)) {
Thread.sleep(100);
}
long endSplitTime = EnvironmentEdgeManager.currentTime();
// split finished
LOG.debug("split finished in =" + (endSplitTime - startSplitTime));
// #3 - incremental backup for multiple tables
tables = Lists.newArrayList(table1, table2);
request = createBackupRequest(BackupType.INCREMENTAL, tables, BACKUP_ROOT_DIR);
String backupIdIncMultiple = client.backupTables(request);
assertTrue(checkSucceeded(backupIdIncMultiple));
// add column family f2 to table1
final byte[] fam2Name = Bytes.toBytes("f2");
table1Desc.addFamily(new HColumnDescriptor(fam2Name));
// drop column family f3
table1Desc.removeFamily(fam3Name);
HBaseTestingUtility.modifyTableSync(TEST_UTIL.getAdmin(), table1Desc);
int NB_ROWS_FAM2 = 7;
HTable t3 = insertIntoTable(conn, table1, fam2Name, 2, NB_ROWS_FAM2);
t3.close();
// #3 - incremental backup for multiple tables
request = createBackupRequest(BackupType.INCREMENTAL, tables, BACKUP_ROOT_DIR);
String backupIdIncMultiple2 = client.backupTables(request);
assertTrue(checkSucceeded(backupIdIncMultiple2));
// #4 - restore full backup for all tables, without overwrite
TableName[] tablesRestoreFull = new TableName[] { table1, table2 };
TableName[] tablesMapFull = new TableName[] { table1_restore, table2_restore };
LOG.debug("Restoring full " + backupIdFull);
client.restore(BackupUtils.createRestoreRequest(BACKUP_ROOT_DIR, backupIdFull, false, tablesRestoreFull, tablesMapFull, false));
// #5.1 - check tables for full restore
HBaseAdmin hAdmin = TEST_UTIL.getHBaseAdmin();
assertTrue(hAdmin.tableExists(table1_restore));
assertTrue(hAdmin.tableExists(table2_restore));
hAdmin.close();
// #5.2 - checking row count of tables for full restore
HTable hTable = (HTable) conn.getTable(table1_restore);
Assert.assertEquals(TEST_UTIL.countRows(hTable), NB_ROWS_IN_BATCH + NB_ROWS_FAM3);
hTable.close();
hTable = (HTable) conn.getTable(table2_restore);
Assert.assertEquals(TEST_UTIL.countRows(hTable), NB_ROWS_IN_BATCH);
hTable.close();
// #6 - restore incremental backup for multiple tables, with overwrite
TableName[] tablesRestoreIncMultiple = new TableName[] { table1, table2 };
TableName[] tablesMapIncMultiple = new TableName[] { table1_restore, table2_restore };
client.restore(BackupUtils.createRestoreRequest(BACKUP_ROOT_DIR, backupIdIncMultiple2, false, tablesRestoreIncMultiple, tablesMapIncMultiple, true));
hTable = (HTable) conn.getTable(table1_restore);
LOG.debug("After incremental restore: " + hTable.getTableDescriptor());
LOG.debug("f1 has " + TEST_UTIL.countRows(hTable, famName) + " rows");
Assert.assertEquals(TEST_UTIL.countRows(hTable, famName), NB_ROWS_IN_BATCH + ADD_ROWS);
LOG.debug("f2 has " + TEST_UTIL.countRows(hTable, fam2Name) + " rows");
Assert.assertEquals(TEST_UTIL.countRows(hTable, fam2Name), NB_ROWS_FAM2);
hTable.close();
hTable = (HTable) conn.getTable(table2_restore);
Assert.assertEquals(TEST_UTIL.countRows(hTable), NB_ROWS_IN_BATCH + 5);
hTable.close();
admin.close();
conn.close();
}
use of org.apache.hadoop.hbase.client.HBaseAdmin in project hbase by apache.
the class TestRestoreBoundaryTests method testFullRestoreSingleEmpty.
/**
* Verify that a single empty table is restored to a new table
* @throws Exception
*/
@Test
public void testFullRestoreSingleEmpty() throws Exception {
LOG.info("test full restore on a single table empty table");
String backupId = fullTableBackup(toList(table1.getNameAsString()));
LOG.info("backup complete");
TableName[] tableset = new TableName[] { table1 };
TableName[] tablemap = new TableName[] { table1_restore };
getBackupAdmin().restore(BackupUtils.createRestoreRequest(BACKUP_ROOT_DIR, backupId, false, tableset, tablemap, false));
HBaseAdmin hba = TEST_UTIL.getHBaseAdmin();
assertTrue(hba.tableExists(table1_restore));
TEST_UTIL.deleteTable(table1_restore);
}
use of org.apache.hadoop.hbase.client.HBaseAdmin in project hbase by apache.
the class TestSystemTableSnapshot method _testBackupRestoreSystemTable.
/**
* Verify that a single table is restored to a new table
* @throws Exception
*/
//@Test - Disabled until we get resolution on system table snapshots
public void _testBackupRestoreSystemTable() throws Exception {
LOG.info("test snapshot system table");
TableName backupSystem = BackupSystemTable.getTableName(conf1);
HBaseAdmin hba = TEST_UTIL.getHBaseAdmin();
String snapshotName = "sysTable";
hba.snapshot(snapshotName, backupSystem);
hba.disableTable(backupSystem);
hba.restoreSnapshot(snapshotName);
hba.enableTable(backupSystem);
hba.close();
}
use of org.apache.hadoop.hbase.client.HBaseAdmin in project hbase by apache.
the class TestBackupMultipleDeletes method testBackupMultipleDeletes.
@Test
public void testBackupMultipleDeletes() throws Exception {
// #1 - create full backup for all tables
LOG.info("create full backup image for all tables");
List<TableName> tables = Lists.newArrayList(table1, table2);
HBaseAdmin admin = null;
Connection conn = ConnectionFactory.createConnection(conf1);
admin = (HBaseAdmin) conn.getAdmin();
BackupAdmin client = new BackupAdminImpl(conn);
BackupRequest request = createBackupRequest(BackupType.FULL, tables, BACKUP_ROOT_DIR);
String backupIdFull = client.backupTables(request);
assertTrue(checkSucceeded(backupIdFull));
// #2 - insert some data to table table1
HTable t1 = (HTable) conn.getTable(table1);
Put p1;
for (int i = 0; i < NB_ROWS_IN_BATCH; i++) {
p1 = new Put(Bytes.toBytes("row-t1" + i));
p1.addColumn(famName, qualName, Bytes.toBytes("val" + i));
t1.put(p1);
}
Assert.assertEquals(TEST_UTIL.countRows(t1), NB_ROWS_IN_BATCH * 2);
t1.close();
// #3 - incremental backup for table1
tables = Lists.newArrayList(table1);
request = createBackupRequest(BackupType.INCREMENTAL, tables, BACKUP_ROOT_DIR);
String backupIdInc1 = client.backupTables(request);
assertTrue(checkSucceeded(backupIdInc1));
// #4 - insert some data to table table2
HTable t2 = (HTable) conn.getTable(table2);
Put p2 = null;
for (int i = 0; i < NB_ROWS_IN_BATCH; i++) {
p2 = new Put(Bytes.toBytes("row-t2" + i));
p2.addColumn(famName, qualName, Bytes.toBytes("val" + i));
t2.put(p2);
}
// #5 - incremental backup for table1, table2
tables = Lists.newArrayList(table1, table2);
request = createBackupRequest(BackupType.INCREMENTAL, tables, BACKUP_ROOT_DIR);
String backupIdInc2 = client.backupTables(request);
assertTrue(checkSucceeded(backupIdInc2));
// #6 - insert some data to table table1
t1 = (HTable) conn.getTable(table1);
for (int i = NB_ROWS_IN_BATCH; i < 2 * NB_ROWS_IN_BATCH; i++) {
p1 = new Put(Bytes.toBytes("row-t1" + i));
p1.addColumn(famName, qualName, Bytes.toBytes("val" + i));
t1.put(p1);
}
// #7 - incremental backup for table1
tables = Lists.newArrayList(table1);
request = createBackupRequest(BackupType.INCREMENTAL, tables, BACKUP_ROOT_DIR);
String backupIdInc3 = client.backupTables(request);
assertTrue(checkSucceeded(backupIdInc3));
// #8 - insert some data to table table2
t2 = (HTable) conn.getTable(table2);
for (int i = NB_ROWS_IN_BATCH; i < 2 * NB_ROWS_IN_BATCH; i++) {
p2 = new Put(Bytes.toBytes("row-t1" + i));
p2.addColumn(famName, qualName, Bytes.toBytes("val" + i));
t2.put(p2);
}
// #9 - incremental backup for table1, table2
tables = Lists.newArrayList(table1, table2);
request = createBackupRequest(BackupType.INCREMENTAL, tables, BACKUP_ROOT_DIR);
String backupIdInc4 = client.backupTables(request);
assertTrue(checkSucceeded(backupIdInc4));
// #10 full backup for table3
tables = Lists.newArrayList(table3);
request = createBackupRequest(BackupType.FULL, tables, BACKUP_ROOT_DIR);
String backupIdFull2 = client.backupTables(request);
assertTrue(checkSucceeded(backupIdFull2));
// #11 - incremental backup for table3
tables = Lists.newArrayList(table3);
request = createBackupRequest(BackupType.INCREMENTAL, tables, BACKUP_ROOT_DIR);
String backupIdInc5 = client.backupTables(request);
assertTrue(checkSucceeded(backupIdInc5));
LOG.error("Delete backupIdInc2");
client.deleteBackups(new String[] { backupIdInc2 });
LOG.error("Delete backupIdInc2 done");
List<BackupInfo> list = client.getHistory(100);
// First check number of backup images before and after
assertEquals(4, list.size());
// then verify that no backupIdInc2,3,4
Set<String> ids = new HashSet<String>();
ids.add(backupIdInc2);
ids.add(backupIdInc3);
ids.add(backupIdInc4);
for (BackupInfo info : list) {
String backupId = info.getBackupId();
if (ids.contains(backupId)) {
assertTrue(false);
}
}
// Verify that backupInc5 contains only table3
boolean found = false;
for (BackupInfo info : list) {
String backupId = info.getBackupId();
if (backupId.equals(backupIdInc5)) {
assertTrue(info.getTables().size() == 1);
assertEquals(table3, info.getTableNames().get(0));
found = true;
}
}
assertTrue(found);
admin.close();
conn.close();
}
use of org.apache.hadoop.hbase.client.HBaseAdmin in project hbase by apache.
the class TestFullBackupSet method testFullBackupSetExist.
/**
* Verify that full backup is created on a single table with data correctly.
* @throws Exception
*/
@Test
public void testFullBackupSetExist() throws Exception {
LOG.info("Test full backup, backup set exists");
// Create set
try (BackupSystemTable table = new BackupSystemTable(TEST_UTIL.getConnection())) {
String name = "name";
table.addToBackupSet(name, new String[] { table1.getNameAsString() });
List<TableName> names = table.describeBackupSet(name);
assertNotNull(names);
assertTrue(names.size() == 1);
assertTrue(names.get(0).equals(table1));
String[] args = new String[] { "create", "full", BACKUP_ROOT_DIR, "-s", name };
// Run backup
int ret = ToolRunner.run(conf1, new BackupDriver(), args);
assertTrue(ret == 0);
List<BackupInfo> backups = table.getBackupHistory();
assertTrue(backups.size() == 1);
String backupId = backups.get(0).getBackupId();
assertTrue(checkSucceeded(backupId));
LOG.info("backup complete");
// Restore from set into other table
args = new String[] { BACKUP_ROOT_DIR, backupId, "-s", name, "-m", table1_restore.getNameAsString(), "-o" };
// Run backup
ret = ToolRunner.run(conf1, new RestoreDriver(), args);
assertTrue(ret == 0);
HBaseAdmin hba = TEST_UTIL.getHBaseAdmin();
assertTrue(hba.tableExists(table1_restore));
// Verify number of rows in both tables
assertEquals(TEST_UTIL.countRows(table1), TEST_UTIL.countRows(table1_restore));
TEST_UTIL.deleteTable(table1_restore);
LOG.info("restore into other table is complete");
hba.close();
}
}
Aggregations