use of org.apache.hadoop.hbase.client.Connection in project hbase by apache.
the class CreateSnapshot method doWork.
@Override
protected int doWork() throws Exception {
Connection connection = null;
Admin admin = null;
try {
connection = ConnectionFactory.createConnection(getConf());
admin = connection.getAdmin();
admin.snapshot(new SnapshotDescription(snapshotName, tableName, snapshotType));
} catch (Exception e) {
System.err.println("failed to take the snapshot: " + e.getMessage());
return -1;
} finally {
if (admin != null) {
admin.close();
}
if (connection != null) {
connection.close();
}
}
return 0;
}
use of org.apache.hadoop.hbase.client.Connection in project hbase by apache.
the class TestBackupLogCleaner method testBackupLogCleaner.
// implements all test cases in 1 test since incremental full backup/
// incremental backup has dependencies
@Test
public void testBackupLogCleaner() throws Exception {
// #1 - create full backup for all tables
LOG.info("create full backup image for all tables");
List<TableName> tableSetFullList = Lists.newArrayList(table1, table2, table3, table4);
try (BackupSystemTable systemTable = new BackupSystemTable(TEST_UTIL.getConnection())) {
// Verify that we have no backup sessions yet
assertFalse(systemTable.hasBackupSessions());
List<FileStatus> walFiles = getListOfWALFiles(TEST_UTIL.getConfiguration());
List<String> swalFiles = convert(walFiles);
BackupLogCleaner cleaner = new BackupLogCleaner();
cleaner.setConf(TEST_UTIL.getConfiguration());
cleaner.init(null);
cleaner.setConf(TEST_UTIL.getConfiguration());
Iterable<FileStatus> deletable = cleaner.getDeletableFiles(walFiles);
int size = Iterables.size(deletable);
// We can delete all files because we do not have yet recorded backup sessions
assertTrue(size == walFiles.size());
systemTable.addWALFiles(swalFiles, "backup", "root");
String backupIdFull = fullTableBackup(tableSetFullList);
assertTrue(checkSucceeded(backupIdFull));
// Check one more time
deletable = cleaner.getDeletableFiles(walFiles);
// We can delete wal files because they were saved into backup system table table
size = Iterables.size(deletable);
assertTrue(size == walFiles.size());
List<FileStatus> newWalFiles = getListOfWALFiles(TEST_UTIL.getConfiguration());
LOG.debug("WAL list after full backup");
convert(newWalFiles);
// New list of wal files is greater than the previous one,
// because new wal per RS have been opened after full backup
assertTrue(walFiles.size() < newWalFiles.size());
Connection conn = ConnectionFactory.createConnection(conf1);
// #2 - insert some data to table
HTable t1 = (HTable) conn.getTable(table1);
Put p1;
for (int i = 0; i < NB_ROWS_IN_BATCH; i++) {
p1 = new Put(Bytes.toBytes("row-t1" + i));
p1.addColumn(famName, qualName, Bytes.toBytes("val" + i));
t1.put(p1);
}
t1.close();
HTable t2 = (HTable) conn.getTable(table2);
Put p2;
for (int i = 0; i < 5; i++) {
p2 = new Put(Bytes.toBytes("row-t2" + i));
p2.addColumn(famName, qualName, Bytes.toBytes("val" + i));
t2.put(p2);
}
t2.close();
// #3 - incremental backup for multiple tables
List<TableName> tableSetIncList = Lists.newArrayList(table1, table2, table3);
String backupIdIncMultiple = backupTables(BackupType.INCREMENTAL, tableSetIncList, BACKUP_ROOT_DIR);
assertTrue(checkSucceeded(backupIdIncMultiple));
deletable = cleaner.getDeletableFiles(newWalFiles);
assertTrue(Iterables.size(deletable) == newWalFiles.size());
conn.close();
}
}
use of org.apache.hadoop.hbase.client.Connection in project hbase by apache.
the class TestIncrementalBackupDeleteTable method TestIncBackupDeleteTable.
// implement all test cases in 1 test since incremental backup/restore has dependencies
@Test
public void TestIncBackupDeleteTable() throws Exception {
// #1 - create full backup for all tables
LOG.info("create full backup image for all tables");
List<TableName> tables = Lists.newArrayList(table1, table2);
HBaseAdmin admin = null;
Connection conn = ConnectionFactory.createConnection(conf1);
admin = (HBaseAdmin) conn.getAdmin();
BackupAdminImpl client = new BackupAdminImpl(conn);
BackupRequest request = createBackupRequest(BackupType.FULL, tables, BACKUP_ROOT_DIR);
String backupIdFull = client.backupTables(request);
assertTrue(checkSucceeded(backupIdFull));
// #2 - insert some data to table table1
HTable t1 = (HTable) conn.getTable(table1);
Put p1;
for (int i = 0; i < NB_ROWS_IN_BATCH; i++) {
p1 = new Put(Bytes.toBytes("row-t1" + i));
p1.addColumn(famName, qualName, Bytes.toBytes("val" + i));
t1.put(p1);
}
Assert.assertEquals(TEST_UTIL.countRows(t1), NB_ROWS_IN_BATCH * 2);
t1.close();
// Delete table table2
admin.disableTable(table2);
admin.deleteTable(table2);
// #3 - incremental backup for table1
tables = Lists.newArrayList(table1);
request = createBackupRequest(BackupType.INCREMENTAL, tables, BACKUP_ROOT_DIR);
String backupIdIncMultiple = client.backupTables(request);
assertTrue(checkSucceeded(backupIdIncMultiple));
// #4 - restore full backup for all tables, without overwrite
TableName[] tablesRestoreFull = new TableName[] { table1, table2 };
TableName[] tablesMapFull = new TableName[] { table1_restore, table2_restore };
client.restore(BackupUtils.createRestoreRequest(BACKUP_ROOT_DIR, backupIdFull, false, tablesRestoreFull, tablesMapFull, false));
// #5.1 - check tables for full restore
HBaseAdmin hAdmin = TEST_UTIL.getHBaseAdmin();
assertTrue(hAdmin.tableExists(table1_restore));
assertTrue(hAdmin.tableExists(table2_restore));
// #5.2 - checking row count of tables for full restore
HTable hTable = (HTable) conn.getTable(table1_restore);
Assert.assertEquals(TEST_UTIL.countRows(hTable), NB_ROWS_IN_BATCH);
hTable.close();
hTable = (HTable) conn.getTable(table2_restore);
Assert.assertEquals(TEST_UTIL.countRows(hTable), NB_ROWS_IN_BATCH);
hTable.close();
// #6 - restore incremental backup for table1
TableName[] tablesRestoreIncMultiple = new TableName[] { table1 };
TableName[] tablesMapIncMultiple = new TableName[] { table1_restore };
client.restore(BackupUtils.createRestoreRequest(BACKUP_ROOT_DIR, backupIdIncMultiple, false, tablesRestoreIncMultiple, tablesMapIncMultiple, true));
hTable = (HTable) conn.getTable(table1_restore);
Assert.assertEquals(TEST_UTIL.countRows(hTable), NB_ROWS_IN_BATCH * 2);
hTable.close();
admin.close();
conn.close();
}
use of org.apache.hadoop.hbase.client.Connection in project hbase by apache.
the class TestRemoteBackup method testFullBackupRemote.
/**
* Verify that a remote full backup is created on a single table with data correctly.
* @throws Exception
*/
@Test
public void testFullBackupRemote() throws Exception {
LOG.info("test remote full backup on a single table");
final CountDownLatch latch = new CountDownLatch(1);
final int NB_ROWS_IN_FAM3 = 6;
final byte[] fam3Name = Bytes.toBytes("f3");
final byte[] fam2Name = Bytes.toBytes("f2");
final Connection conn = ConnectionFactory.createConnection(conf1);
Thread t = new Thread() {
@Override
public void run() {
try {
latch.await();
} catch (InterruptedException ie) {
}
try {
HTable t1 = (HTable) conn.getTable(table1);
Put p1;
for (int i = 0; i < NB_ROWS_IN_FAM3; i++) {
p1 = new Put(Bytes.toBytes("row-t1" + i));
p1.addColumn(fam3Name, qualName, Bytes.toBytes("val" + i));
t1.put(p1);
}
LOG.debug("Wrote " + NB_ROWS_IN_FAM3 + " rows into family3");
t1.close();
} catch (IOException ioe) {
throw new RuntimeException(ioe);
}
}
};
t.start();
table1Desc.addFamily(new HColumnDescriptor(fam3Name));
// family 2 is MOB enabled
HColumnDescriptor hcd = new HColumnDescriptor(fam2Name);
hcd.setMobEnabled(true);
hcd.setMobThreshold(0L);
table1Desc.addFamily(hcd);
HBaseTestingUtility.modifyTableSync(TEST_UTIL.getAdmin(), table1Desc);
SnapshotTestingUtils.loadData(TEST_UTIL, table1, 50, fam2Name);
HTable t1 = (HTable) conn.getTable(table1);
int rows0 = MobSnapshotTestingUtils.countMobRows(t1, fam2Name);
latch.countDown();
String backupId = backupTables(BackupType.FULL, Lists.newArrayList(table1), BACKUP_REMOTE_ROOT_DIR);
assertTrue(checkSucceeded(backupId));
LOG.info("backup complete " + backupId);
Assert.assertEquals(TEST_UTIL.countRows(t1, famName), NB_ROWS_IN_BATCH);
t.join();
Assert.assertEquals(TEST_UTIL.countRows(t1, fam3Name), NB_ROWS_IN_FAM3);
t1.close();
TableName[] tablesRestoreFull = new TableName[] { table1 };
TableName[] tablesMapFull = new TableName[] { table1_restore };
BackupAdmin client = getBackupAdmin();
client.restore(BackupUtils.createRestoreRequest(BACKUP_REMOTE_ROOT_DIR, backupId, false, tablesRestoreFull, tablesMapFull, false));
// check tables for full restore
HBaseAdmin hAdmin = TEST_UTIL.getHBaseAdmin();
assertTrue(hAdmin.tableExists(table1_restore));
// #5.2 - checking row count of tables for full restore
HTable hTable = (HTable) conn.getTable(table1_restore);
Assert.assertEquals(TEST_UTIL.countRows(hTable, famName), NB_ROWS_IN_BATCH);
int cnt3 = TEST_UTIL.countRows(hTable, fam3Name);
Assert.assertTrue(cnt3 >= 0 && cnt3 <= NB_ROWS_IN_FAM3);
int rows1 = MobSnapshotTestingUtils.countMobRows(t1, fam2Name);
Assert.assertEquals(rows0, rows1);
hTable.close();
hAdmin.close();
}
use of org.apache.hadoop.hbase.client.Connection in project hbase by apache.
the class TestBackupBase method backupTables.
protected String backupTables(BackupType type, List<TableName> tables, String path) throws IOException {
Connection conn = null;
BackupAdmin badmin = null;
String backupId;
try {
conn = ConnectionFactory.createConnection(conf1);
badmin = new BackupAdminImpl(conn);
BackupRequest request = createBackupRequest(type, tables, path);
backupId = badmin.backupTables(request);
} finally {
if (badmin != null) {
badmin.close();
}
if (conn != null) {
conn.close();
}
}
return backupId;
}
Aggregations