Search in sources :

Example 1 with BackupCopyJob

use of org.apache.hadoop.hbase.backup.BackupCopyJob in project hbase by apache.

the class IncrementalTableBackupClient method incrementalCopy.

/**
   * Do incremental copy.
   * @param backupInfo backup info
   */
private void incrementalCopy(BackupInfo backupInfo) throws Exception {
    LOG.info("Incremental copy is starting.");
    // set overall backup phase: incremental_copy
    backupInfo.setPhase(BackupPhase.INCREMENTAL_COPY);
    // get incremental backup file list and prepare parms for DistCp
    List<String> incrBackupFileList = backupInfo.getIncrBackupFileList();
    // filter missing files out (they have been copied by previous backups)
    incrBackupFileList = filterMissingFiles(incrBackupFileList);
    String[] strArr = incrBackupFileList.toArray(new String[incrBackupFileList.size() + 1]);
    strArr[strArr.length - 1] = backupInfo.getHLogTargetDir();
    BackupCopyJob copyService = BackupRestoreFactory.getBackupCopyJob(conf);
    int counter = 0;
    int MAX_ITERAIONS = 2;
    while (counter++ < MAX_ITERAIONS) {
        // We run DistCp maximum 2 times
        // If it fails on a second time, we throw Exception
        int res = copyService.copy(backupInfo, backupManager, conf, BackupType.INCREMENTAL, strArr);
        if (res != 0) {
            LOG.error("Copy incremental log files failed with return code: " + res + ".");
            throw new IOException("Failed of Hadoop Distributed Copy from " + StringUtils.join(incrBackupFileList, ",") + " to " + backupInfo.getHLogTargetDir());
        }
        List<String> missingFiles = getMissingFiles(incrBackupFileList);
        if (missingFiles.isEmpty()) {
            break;
        } else {
            // update backupInfo and strAttr
            if (counter == MAX_ITERAIONS) {
                String msg = "DistCp could not finish the following files: " + StringUtils.join(missingFiles, ",");
                LOG.error(msg);
                throw new IOException(msg);
            }
            List<String> converted = convertFilesFromWALtoOldWAL(missingFiles);
            incrBackupFileList.removeAll(missingFiles);
            incrBackupFileList.addAll(converted);
            backupInfo.setIncrBackupFileList(incrBackupFileList);
            // Run DistCp only for missing files (which have been moved from WALs to oldWALs
            // during previous run)
            strArr = converted.toArray(new String[converted.size() + 1]);
            strArr[strArr.length - 1] = backupInfo.getHLogTargetDir();
        }
    }
    LOG.info("Incremental copy from " + StringUtils.join(incrBackupFileList, ",") + " to " + backupInfo.getHLogTargetDir() + " finished.");
}
Also used : BackupCopyJob(org.apache.hadoop.hbase.backup.BackupCopyJob) IOException(java.io.IOException)

Example 2 with BackupCopyJob

use of org.apache.hadoop.hbase.backup.BackupCopyJob in project hbase by apache.

the class FullTableBackupClient method snapshotCopy.

/**
   * Do snapshot copy.
   * @param backupInfo backup info
   * @throws Exception exception
   */
private void snapshotCopy(BackupInfo backupInfo) throws Exception {
    LOG.info("Snapshot copy is starting.");
    // set overall backup phase: snapshot_copy
    backupInfo.setPhase(BackupPhase.SNAPSHOTCOPY);
    // call ExportSnapshot to copy files based on hbase snapshot for backup
    // ExportSnapshot only support single snapshot export, need loop for multiple tables case
    BackupCopyJob copyService = BackupRestoreFactory.getBackupCopyJob(conf);
    // number of snapshots matches number of tables
    float numOfSnapshots = backupInfo.getSnapshotNames().size();
    LOG.debug("There are " + (int) numOfSnapshots + " snapshots to be copied.");
    for (TableName table : backupInfo.getTables()) {
        // Currently we simply set the sub copy tasks by counting the table snapshot number, we can
        // calculate the real files' size for the percentage in the future.
        // backupCopier.setSubTaskPercntgInWholeTask(1f / numOfSnapshots);
        int res = 0;
        String[] args = new String[4];
        args[0] = "-snapshot";
        args[1] = backupInfo.getSnapshotName(table);
        args[2] = "-copy-to";
        args[3] = backupInfo.getTableBackupDir(table);
        LOG.debug("Copy snapshot " + args[1] + " to " + args[3]);
        res = copyService.copy(backupInfo, backupManager, conf, BackupType.FULL, args);
        // if one snapshot export failed, do not continue for remained snapshots
        if (res != 0) {
            LOG.error("Exporting Snapshot " + args[1] + " failed with return code: " + res + ".");
            throw new IOException("Failed of exporting snapshot " + args[1] + " to " + args[3] + " with reason code " + res);
        }
        LOG.info("Snapshot copy " + args[1] + " finished.");
    }
}
Also used : TableName(org.apache.hadoop.hbase.TableName) BackupCopyJob(org.apache.hadoop.hbase.backup.BackupCopyJob) IOException(java.io.IOException)

Aggregations

IOException (java.io.IOException)2 BackupCopyJob (org.apache.hadoop.hbase.backup.BackupCopyJob)2 TableName (org.apache.hadoop.hbase.TableName)1