Search in sources :

Example 11 with SnapshotDescription

use of org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.SnapshotDescription in project hbase by apache.

the class TestSnapshotFromMaster method testDeleteSnapshot.

@Test(timeout = 300000)
public void testDeleteSnapshot() throws Exception {
    String snapshotName = "completed";
    SnapshotDescription snapshot = SnapshotDescription.newBuilder().setName(snapshotName).build();
    DeleteSnapshotRequest request = DeleteSnapshotRequest.newBuilder().setSnapshot(snapshot).build();
    try {
        master.getMasterRpcServices().deleteSnapshot(null, request);
        fail("Master didn't throw exception when attempting to delete snapshot that doesn't exist");
    } catch (org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException e) {
    // Expected
    }
    // write one snapshot to the fs
    createSnapshot(snapshotName);
    // then delete the existing snapshot,which shouldn't cause an exception to be thrown
    master.getMasterRpcServices().deleteSnapshot(null, request);
}
Also used : SnapshotDescription(org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.SnapshotDescription) DeleteSnapshotRequest(org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteSnapshotRequest) Test(org.junit.Test)

Example 12 with SnapshotDescription

use of org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.SnapshotDescription in project hbase by apache.

the class TestSnapshotFromMaster method testIsDoneContract.

/**
   * Test that the contract from the master for checking on a snapshot are valid.
   * <p>
   * <ol>
   * <li>If a snapshot fails with an error, we expect to get the source error.</li>
   * <li>If there is no snapshot name supplied, we should get an error.</li>
   * <li>If asking about a snapshot has hasn't occurred, you should get an error.</li>
   * </ol>
   */
@Test(timeout = 300000)
public void testIsDoneContract() throws Exception {
    IsSnapshotDoneRequest.Builder builder = IsSnapshotDoneRequest.newBuilder();
    String snapshotName = "asyncExpectedFailureTest";
    // check that we get an exception when looking up snapshot where one hasn't happened
    SnapshotTestingUtils.expectSnapshotDoneException(master, builder.build(), UnknownSnapshotException.class);
    // and that we get the same issue, even if we specify a name
    SnapshotDescription desc = SnapshotDescription.newBuilder().setName(snapshotName).setTable(TABLE_NAME.getNameAsString()).build();
    builder.setSnapshot(desc);
    SnapshotTestingUtils.expectSnapshotDoneException(master, builder.build(), UnknownSnapshotException.class);
    // set a mock handler to simulate a snapshot
    DisabledTableSnapshotHandler mockHandler = Mockito.mock(DisabledTableSnapshotHandler.class);
    Mockito.when(mockHandler.getException()).thenReturn(null);
    Mockito.when(mockHandler.getSnapshot()).thenReturn(desc);
    Mockito.when(mockHandler.isFinished()).thenReturn(new Boolean(true));
    Mockito.when(mockHandler.getCompletionTimestamp()).thenReturn(EnvironmentEdgeManager.currentTime());
    master.getSnapshotManager().setSnapshotHandlerForTesting(TABLE_NAME, mockHandler);
    // if we do a lookup without a snapshot name, we should fail - you should always know your name
    builder = IsSnapshotDoneRequest.newBuilder();
    SnapshotTestingUtils.expectSnapshotDoneException(master, builder.build(), UnknownSnapshotException.class);
    // then do the lookup for the snapshot that it is done
    builder.setSnapshot(desc);
    IsSnapshotDoneResponse response = master.getMasterRpcServices().isSnapshotDone(null, builder.build());
    assertTrue("Snapshot didn't complete when it should have.", response.getDone());
    // now try the case where we are looking for a snapshot we didn't take
    builder.setSnapshot(SnapshotDescription.newBuilder().setName("Not A Snapshot").build());
    SnapshotTestingUtils.expectSnapshotDoneException(master, builder.build(), UnknownSnapshotException.class);
    // then create a snapshot to the fs and make sure that we can find it when checking done
    snapshotName = "completed";
    desc = createSnapshot(snapshotName);
    builder.setSnapshot(desc);
    response = master.getMasterRpcServices().isSnapshotDone(null, builder.build());
    assertTrue("Completed, on-disk snapshot not found", response.getDone());
}
Also used : IsSnapshotDoneResponse(org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsSnapshotDoneResponse) SnapshotDescription(org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.SnapshotDescription) IsSnapshotDoneRequest(org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsSnapshotDoneRequest) DisabledTableSnapshotHandler(org.apache.hadoop.hbase.master.snapshot.DisabledTableSnapshotHandler) Test(org.junit.Test)

Example 13 with SnapshotDescription

use of org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.SnapshotDescription in project hbase by apache.

the class RestoreTool method getTableDesc.

/**
   * Get table descriptor
   * @param tableName is the table backed up
   * @return {@link HTableDescriptor} saved in backup image of the table
   */
HTableDescriptor getTableDesc(TableName tableName) throws FileNotFoundException, IOException {
    Path tableInfoPath = this.getTableInfoPath(tableName);
    SnapshotDescription desc = SnapshotDescriptionUtils.readSnapshotInfo(fs, tableInfoPath);
    SnapshotManifest manifest = SnapshotManifest.open(conf, fs, tableInfoPath, desc);
    HTableDescriptor tableDescriptor = manifest.getTableDescriptor();
    if (!tableDescriptor.getTableName().equals(tableName)) {
        LOG.error("couldn't find Table Desc for table: " + tableName + " under tableInfoPath: " + tableInfoPath.toString());
        LOG.error("tableDescriptor.getNameAsString() = " + tableDescriptor.getNameAsString());
        throw new FileNotFoundException("couldn't find Table Desc for table: " + tableName + " under tableInfoPath: " + tableInfoPath.toString());
    }
    return tableDescriptor;
}
Also used : Path(org.apache.hadoop.fs.Path) SnapshotManifest(org.apache.hadoop.hbase.snapshot.SnapshotManifest) FileNotFoundException(java.io.FileNotFoundException) SnapshotDescription(org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.SnapshotDescription) HTableDescriptor(org.apache.hadoop.hbase.HTableDescriptor)

Example 14 with SnapshotDescription

use of org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.SnapshotDescription in project hbase by apache.

the class RestoreTool method restoreTableAndCreate.

private void restoreTableAndCreate(Connection conn, TableName tableName, TableName newTableName, Path tableBackupPath, boolean truncateIfExists, String lastIncrBackupId) throws IOException {
    if (newTableName == null) {
        newTableName = tableName;
    }
    FileSystem fileSys = tableBackupPath.getFileSystem(this.conf);
    // get table descriptor first
    HTableDescriptor tableDescriptor = getTableDescriptor(fileSys, tableName, lastIncrBackupId);
    if (tableDescriptor != null) {
        LOG.debug("Retrieved descriptor: " + tableDescriptor + " thru " + lastIncrBackupId);
    }
    if (tableDescriptor == null) {
        Path tableSnapshotPath = getTableSnapshotPath(backupRootPath, tableName, backupId);
        if (fileSys.exists(tableSnapshotPath)) {
            // check whether snapshot dir already recorded for target table
            if (snapshotMap.get(tableName) != null) {
                SnapshotDescription desc = SnapshotDescriptionUtils.readSnapshotInfo(fileSys, tableSnapshotPath);
                SnapshotManifest manifest = SnapshotManifest.open(conf, fileSys, tableSnapshotPath, desc);
                tableDescriptor = manifest.getTableDescriptor();
            } else {
                tableDescriptor = getTableDesc(tableName);
                snapshotMap.put(tableName, getTableInfoPath(tableName));
            }
            if (tableDescriptor == null) {
                LOG.debug("Found no table descriptor in the snapshot dir, previous schema would be lost");
            }
        } else {
            throw new IOException("Table snapshot directory: " + tableSnapshotPath + " does not exist.");
        }
    }
    Path tableArchivePath = getTableArchivePath(tableName);
    if (tableArchivePath == null) {
        if (tableDescriptor != null) {
            // find table descriptor but no archive dir means the table is empty, create table and exit
            if (LOG.isDebugEnabled()) {
                LOG.debug("find table descriptor but no archive dir for table " + tableName + ", will only create table");
            }
            tableDescriptor.setName(newTableName);
            checkAndCreateTable(conn, tableBackupPath, tableName, newTableName, null, tableDescriptor, truncateIfExists);
            return;
        } else {
            throw new IllegalStateException("Cannot restore hbase table because directory '" + " tableArchivePath is null.");
        }
    }
    if (tableDescriptor == null) {
        tableDescriptor = new HTableDescriptor(newTableName);
    } else {
        tableDescriptor.setName(newTableName);
    }
    // load all files in dir
    try {
        ArrayList<Path> regionPathList = getRegionList(tableName);
        // should only try to create the table with all region informations, so we could pre-split
        // the regions in fine grain
        checkAndCreateTable(conn, tableBackupPath, tableName, newTableName, regionPathList, tableDescriptor, truncateIfExists);
        if (tableArchivePath != null) {
            // start real restore through bulkload
            // if the backup target is on local cluster, special action needed
            Path tempTableArchivePath = checkLocalAndBackup(tableArchivePath);
            if (tempTableArchivePath.equals(tableArchivePath)) {
                if (LOG.isDebugEnabled()) {
                    LOG.debug("TableArchivePath for bulkload using existPath: " + tableArchivePath);
                }
            } else {
                // point to the tempDir
                regionPathList = getRegionList(tempTableArchivePath);
                if (LOG.isDebugEnabled()) {
                    LOG.debug("TableArchivePath for bulkload using tempPath: " + tempTableArchivePath);
                }
            }
            LoadIncrementalHFiles loader = createLoader(tempTableArchivePath, false);
            for (Path regionPath : regionPathList) {
                String regionName = regionPath.toString();
                if (LOG.isDebugEnabled()) {
                    LOG.debug("Restoring HFiles from directory " + regionName);
                }
                String[] args = { regionName, newTableName.getNameAsString() };
                loader.run(args);
            }
        }
    // we do not recovered edits
    } catch (Exception e) {
        throw new IllegalStateException("Cannot restore hbase table", e);
    }
}
Also used : Path(org.apache.hadoop.fs.Path) SnapshotManifest(org.apache.hadoop.hbase.snapshot.SnapshotManifest) LoadIncrementalHFiles(org.apache.hadoop.hbase.mapreduce.LoadIncrementalHFiles) FileSystem(org.apache.hadoop.fs.FileSystem) HRegionFileSystem(org.apache.hadoop.hbase.regionserver.HRegionFileSystem) HBackupFileSystem(org.apache.hadoop.hbase.backup.HBackupFileSystem) SnapshotDescription(org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.SnapshotDescription) IOException(java.io.IOException) IOException(java.io.IOException) FileNotFoundException(java.io.FileNotFoundException) HTableDescriptor(org.apache.hadoop.hbase.HTableDescriptor)

Example 15 with SnapshotDescription

use of org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.SnapshotDescription in project hbase by apache.

the class MasterRpcServices method snapshot.

/**
   * Triggers an asynchronous attempt to take a snapshot.
   * {@inheritDoc}
   */
@Override
public SnapshotResponse snapshot(RpcController controller, SnapshotRequest request) throws ServiceException {
    try {
        master.checkInitialized();
        master.snapshotManager.checkSnapshotSupport();
        LOG.info(master.getClientIdAuditPrefix() + " snapshot request for:" + ClientSnapshotDescriptionUtils.toString(request.getSnapshot()));
        // get the snapshot information
        SnapshotDescription snapshot = SnapshotDescriptionUtils.validate(request.getSnapshot(), master.getConfiguration());
        master.snapshotManager.takeSnapshot(snapshot);
        // send back the max amount of time the client should wait for the snapshot to complete
        long waitTime = SnapshotDescriptionUtils.getMaxMasterTimeout(master.getConfiguration(), snapshot.getType(), SnapshotDescriptionUtils.DEFAULT_MAX_WAIT_TIME);
        return SnapshotResponse.newBuilder().setExpectedTimeout(waitTime).build();
    } catch (ForeignException e) {
        throw new ServiceException(e.getCause());
    } catch (IOException e) {
        throw new ServiceException(e);
    }
}
Also used : ServiceException(org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException) ForeignException(org.apache.hadoop.hbase.errorhandling.ForeignException) SnapshotDescription(org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.SnapshotDescription) IOException(java.io.IOException) DoNotRetryIOException(org.apache.hadoop.hbase.DoNotRetryIOException)

Aggregations

SnapshotDescription (org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.SnapshotDescription)27 Path (org.apache.hadoop.fs.Path)15 IOException (java.io.IOException)13 HTableDescriptor (org.apache.hadoop.hbase.HTableDescriptor)8 Test (org.junit.Test)7 FileSystem (org.apache.hadoop.fs.FileSystem)5 FileNotFoundException (java.io.FileNotFoundException)4 HRegionInfo (org.apache.hadoop.hbase.HRegionInfo)4 TableName (org.apache.hadoop.hbase.TableName)4 SnapshotRegionManifest (org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotRegionManifest)4 MasterFileSystem (org.apache.hadoop.hbase.master.MasterFileSystem)3 InterruptedIOException (java.io.InterruptedIOException)2 ArrayList (java.util.ArrayList)2 HashSet (java.util.HashSet)2 Configuration (org.apache.hadoop.conf.Configuration)2 FSDataInputStream (org.apache.hadoop.fs.FSDataInputStream)2 FileStatus (org.apache.hadoop.fs.FileStatus)2 DoNotRetryIOException (org.apache.hadoop.hbase.DoNotRetryIOException)2 HBaseConfiguration (org.apache.hadoop.hbase.HBaseConfiguration)2 Admin (org.apache.hadoop.hbase.client.Admin)2