Search in sources :

Example 21 with SnapshotDescription

use of org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotDescription in project hbase by apache.

the class MasterRpcServices method snapshot.

/**
 * Triggers an asynchronous attempt to take a snapshot.
 * {@inheritDoc}
 */
@Override
public SnapshotResponse snapshot(RpcController controller, SnapshotRequest request) throws ServiceException {
    try {
        server.checkInitialized();
        server.snapshotManager.checkSnapshotSupport();
        LOG.info(server.getClientIdAuditPrefix() + " snapshot request for:" + ClientSnapshotDescriptionUtils.toString(request.getSnapshot()));
        // get the snapshot information
        SnapshotDescription snapshot = SnapshotDescriptionUtils.validate(request.getSnapshot(), server.getConfiguration());
        server.snapshotManager.takeSnapshot(snapshot);
        // send back the max amount of time the client should wait for the snapshot to complete
        long waitTime = SnapshotDescriptionUtils.getMaxMasterTimeout(server.getConfiguration(), snapshot.getType(), SnapshotDescriptionUtils.DEFAULT_MAX_WAIT_TIME);
        return SnapshotResponse.newBuilder().setExpectedTimeout(waitTime).build();
    } catch (ForeignException e) {
        throw new ServiceException(e.getCause());
    } catch (IOException e) {
        throw new ServiceException(e);
    }
}
Also used : ServiceException(org.apache.hbase.thirdparty.com.google.protobuf.ServiceException) ForeignException(org.apache.hadoop.hbase.errorhandling.ForeignException) SnapshotDescription(org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotDescription) IOException(java.io.IOException) DoNotRetryIOException(org.apache.hadoop.hbase.DoNotRetryIOException)

Example 22 with SnapshotDescription

use of org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotDescription in project hbase by apache.

the class SnapshotReferenceUtil method verifySnapshot.

/**
 * Verify the validity of the snapshot
 *
 * @param conf The current {@link Configuration} instance.
 * @param fs {@link FileSystem}
 * @param manifest snapshot manifest to inspect
 * @throws CorruptedSnapshotException if the snapshot is corrupted
 * @throws IOException if an error occurred while scanning the directory
 */
public static void verifySnapshot(final Configuration conf, final FileSystem fs, final SnapshotManifest manifest) throws IOException {
    final SnapshotDescription snapshotDesc = manifest.getSnapshotDescription();
    final Path snapshotDir = manifest.getSnapshotDir();
    concurrentVisitReferencedFiles(conf, fs, manifest, "VerifySnapshot", new StoreFileVisitor() {

        @Override
        public void storeFile(final RegionInfo regionInfo, final String family, final SnapshotRegionManifest.StoreFile storeFile) throws IOException {
            verifyStoreFile(conf, fs, snapshotDir, snapshotDesc, regionInfo, family, storeFile);
        }
    });
}
Also used : Path(org.apache.hadoop.fs.Path) SnapshotRegionManifest(org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotRegionManifest) RegionInfo(org.apache.hadoop.hbase.client.RegionInfo) SnapshotDescription(org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotDescription) InterruptedIOException(java.io.InterruptedIOException) IOException(java.io.IOException)

Example 23 with SnapshotDescription

use of org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotDescription in project hbase by apache.

the class SnapshotReferenceUtil method concurrentVisitReferencedFiles.

public static void concurrentVisitReferencedFiles(final Configuration conf, final FileSystem fs, final SnapshotManifest manifest, final ExecutorService exec, final StoreFileVisitor visitor) throws IOException {
    final SnapshotDescription snapshotDesc = manifest.getSnapshotDescription();
    final Path snapshotDir = manifest.getSnapshotDir();
    List<SnapshotRegionManifest> regionManifests = manifest.getRegionManifests();
    if (regionManifests == null || regionManifests.isEmpty()) {
        LOG.debug("No manifest files present: " + snapshotDir);
        return;
    }
    final ExecutorCompletionService<Void> completionService = new ExecutorCompletionService<>(exec);
    for (final SnapshotRegionManifest regionManifest : regionManifests) {
        completionService.submit(new Callable<Void>() {

            @Override
            public Void call() throws IOException {
                visitRegionStoreFiles(regionManifest, visitor);
                return null;
            }
        });
    }
    try {
        for (int i = 0; i < regionManifests.size(); ++i) {
            completionService.take().get();
        }
    } catch (InterruptedException e) {
        throw new InterruptedIOException(e.getMessage());
    } catch (ExecutionException e) {
        if (e.getCause() instanceof CorruptedSnapshotException) {
            throw new CorruptedSnapshotException(e.getCause().getMessage(), ProtobufUtil.createSnapshotDesc(snapshotDesc));
        } else {
            throw new IOException(e.getCause());
        }
    }
}
Also used : Path(org.apache.hadoop.fs.Path) InterruptedIOException(java.io.InterruptedIOException) SnapshotRegionManifest(org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotRegionManifest) ExecutorCompletionService(java.util.concurrent.ExecutorCompletionService) SnapshotDescription(org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotDescription) InterruptedIOException(java.io.InterruptedIOException) IOException(java.io.IOException) ExecutionException(java.util.concurrent.ExecutionException)

Example 24 with SnapshotDescription

use of org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotDescription in project hbase by apache.

the class RestoreTool method createAndRestoreTable.

private void createAndRestoreTable(Connection conn, TableName tableName, TableName newTableName, Path tableBackupPath, boolean truncateIfExists, String lastIncrBackupId) throws IOException {
    if (newTableName == null) {
        newTableName = tableName;
    }
    FileSystem fileSys = tableBackupPath.getFileSystem(this.conf);
    // get table descriptor first
    TableDescriptor tableDescriptor = getTableDescriptor(fileSys, tableName, lastIncrBackupId);
    if (tableDescriptor != null) {
        LOG.debug("Retrieved descriptor: " + tableDescriptor + " thru " + lastIncrBackupId);
    }
    if (tableDescriptor == null) {
        Path tableSnapshotPath = getTableSnapshotPath(backupRootPath, tableName, backupId);
        if (fileSys.exists(tableSnapshotPath)) {
            // check whether snapshot dir already recorded for target table
            if (snapshotMap.get(tableName) != null) {
                SnapshotDescription desc = SnapshotDescriptionUtils.readSnapshotInfo(fileSys, tableSnapshotPath);
                SnapshotManifest manifest = SnapshotManifest.open(conf, fileSys, tableSnapshotPath, desc);
                tableDescriptor = manifest.getTableDescriptor();
            } else {
                tableDescriptor = getTableDesc(tableName);
                snapshotMap.put(tableName, getTableInfoPath(tableName));
            }
            if (tableDescriptor == null) {
                LOG.debug("Found no table descriptor in the snapshot dir, previous schema would be lost");
            }
        } else {
            throw new IOException("Table snapshot directory: " + tableSnapshotPath + " does not exist.");
        }
    }
    Path tableArchivePath = getTableArchivePath(tableName);
    if (tableArchivePath == null) {
        if (tableDescriptor != null) {
            // find table descriptor but no archive dir means the table is empty, create table and exit
            if (LOG.isDebugEnabled()) {
                LOG.debug("find table descriptor but no archive dir for table " + tableName + ", will only create table");
            }
            tableDescriptor = TableDescriptorBuilder.copy(newTableName, tableDescriptor);
            checkAndCreateTable(conn, tableBackupPath, tableName, newTableName, null, tableDescriptor, truncateIfExists);
            return;
        } else {
            throw new IllegalStateException("Cannot restore hbase table because directory '" + " tableArchivePath is null.");
        }
    }
    if (tableDescriptor == null) {
        tableDescriptor = TableDescriptorBuilder.newBuilder(newTableName).build();
    } else {
        tableDescriptor = TableDescriptorBuilder.copy(newTableName, tableDescriptor);
    }
    // load all files in dir
    try {
        ArrayList<Path> regionPathList = getRegionList(tableName);
        // should only try to create the table with all region informations, so we could pre-split
        // the regions in fine grain
        checkAndCreateTable(conn, tableBackupPath, tableName, newTableName, regionPathList, tableDescriptor, truncateIfExists);
        RestoreJob restoreService = BackupRestoreFactory.getRestoreJob(conf);
        Path[] paths = new Path[regionPathList.size()];
        regionPathList.toArray(paths);
        restoreService.run(paths, new TableName[] { tableName }, new TableName[] { newTableName }, true);
    } catch (Exception e) {
        LOG.error(e.toString(), e);
        throw new IllegalStateException("Cannot restore hbase table", e);
    }
}
Also used : Path(org.apache.hadoop.fs.Path) RestoreJob(org.apache.hadoop.hbase.backup.RestoreJob) SnapshotManifest(org.apache.hadoop.hbase.snapshot.SnapshotManifest) FileSystem(org.apache.hadoop.fs.FileSystem) HBackupFileSystem(org.apache.hadoop.hbase.backup.HBackupFileSystem) SnapshotDescription(org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotDescription) IOException(java.io.IOException) TableDescriptor(org.apache.hadoop.hbase.client.TableDescriptor) NamespaceNotFoundException(org.apache.hadoop.hbase.NamespaceNotFoundException) IOException(java.io.IOException) FileNotFoundException(java.io.FileNotFoundException)

Example 25 with SnapshotDescription

use of org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotDescription in project hbase by apache.

the class TestSnapshotStoreFileSize method testIsStoreFileSizeMatchFilesystemAndManifest.

@Test
public void testIsStoreFileSizeMatchFilesystemAndManifest() throws IOException {
    admin = UTIL.getAdmin();
    fs = UTIL.getTestFileSystem();
    UTIL.createTable(TABLE_NAME, FAMILY_NAME.getBytes());
    Table table = admin.getConnection().getTable(TABLE_NAME);
    UTIL.loadRandomRows(table, FAMILY_NAME.getBytes(), 3, 1000);
    admin.snapshot(SNAPSHOT_NAME, TABLE_NAME);
    Map<String, Long> storeFileInfoFromManifest = new HashMap<String, Long>();
    Map<String, Long> storeFileInfoFromFS = new HashMap<String, Long>();
    String storeFileName = "";
    long storeFilesize = 0L;
    Path snapshotDir = SnapshotDescriptionUtils.getCompletedSnapshotDir(SNAPSHOT_NAME, UTIL.getDefaultRootDirPath());
    SnapshotDescription snapshotDesc = SnapshotDescriptionUtils.readSnapshotInfo(fs, snapshotDir);
    SnapshotManifest snaphotManifest = SnapshotManifest.open(conf, fs, snapshotDir, snapshotDesc);
    List<SnapshotRegionManifest> regionManifest = snaphotManifest.getRegionManifests();
    for (int i = 0; i < regionManifest.size(); i++) {
        SnapshotRegionManifest.FamilyFiles family = regionManifest.get(i).getFamilyFiles(0);
        List<SnapshotRegionManifest.StoreFile> storeFiles = family.getStoreFilesList();
        for (int j = 0; j < storeFiles.size(); j++) {
            storeFileName = storeFiles.get(j).getName();
            storeFilesize = storeFiles.get(j).getFileSize();
            storeFileInfoFromManifest.put(storeFileName, storeFilesize);
        }
    }
    List<RegionInfo> regionsInfo = admin.getRegions(TABLE_NAME);
    Path path = CommonFSUtils.getTableDir(UTIL.getDefaultRootDirPath(), TABLE_NAME);
    for (RegionInfo regionInfo : regionsInfo) {
        HRegionFileSystem hRegionFileSystem = HRegionFileSystem.openRegionFromFileSystem(conf, fs, path, regionInfo, true);
        Collection<StoreFileInfo> storeFilesFS = hRegionFileSystem.getStoreFiles(FAMILY_NAME);
        Iterator<StoreFileInfo> sfIterator = storeFilesFS.iterator();
        while (sfIterator.hasNext()) {
            StoreFileInfo sfi = sfIterator.next();
            FileStatus[] fileStatus = CommonFSUtils.listStatus(fs, sfi.getPath());
            storeFileName = fileStatus[0].getPath().getName();
            storeFilesize = fileStatus[0].getLen();
            storeFileInfoFromFS.put(storeFileName, storeFilesize);
        }
    }
    Assert.assertEquals(storeFileInfoFromManifest, storeFileInfoFromFS);
}
Also used : Path(org.apache.hadoop.fs.Path) Table(org.apache.hadoop.hbase.client.Table) FileStatus(org.apache.hadoop.fs.FileStatus) HashMap(java.util.HashMap) SnapshotRegionManifest(org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotRegionManifest) RegionInfo(org.apache.hadoop.hbase.client.RegionInfo) SnapshotDescription(org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotDescription) HRegionFileSystem(org.apache.hadoop.hbase.regionserver.HRegionFileSystem) StoreFileInfo(org.apache.hadoop.hbase.regionserver.StoreFileInfo) Test(org.junit.Test)

Aggregations

SnapshotDescription (org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotDescription)26 Path (org.apache.hadoop.fs.Path)16 IOException (java.io.IOException)12 FileSystem (org.apache.hadoop.fs.FileSystem)6 SnapshotRegionManifest (org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotRegionManifest)6 TableDescriptor (org.apache.hadoop.hbase.client.TableDescriptor)5 SnapshotManifest (org.apache.hadoop.hbase.snapshot.SnapshotManifest)5 Test (org.junit.Test)5 FileNotFoundException (java.io.FileNotFoundException)4 ArrayList (java.util.ArrayList)4 TableName (org.apache.hadoop.hbase.TableName)4 RegionInfo (org.apache.hadoop.hbase.client.RegionInfo)4 HashSet (java.util.HashSet)3 Configuration (org.apache.hadoop.conf.Configuration)3 FileStatus (org.apache.hadoop.fs.FileStatus)3 HBaseConfiguration (org.apache.hadoop.hbase.HBaseConfiguration)3 MasterFileSystem (org.apache.hadoop.hbase.master.MasterFileSystem)3 InterruptedIOException (java.io.InterruptedIOException)2 ExecutionException (java.util.concurrent.ExecutionException)2 DoNotRetryIOException (org.apache.hadoop.hbase.DoNotRetryIOException)2