Search in sources :

Example 11 with SnapshotException

use of org.apache.hadoop.hdfs.protocol.SnapshotException in project hadoop by apache.

the class FSDirSnapshotOp method checkSnapshot.

/**
   * Check if the given INode (or one of its descendants) is snapshottable and
   * already has snapshots.
   *
   * @param target The given INode
   * @param snapshottableDirs The list of directories that are snapshottable
   *                          but do not have snapshots yet
   */
private static void checkSnapshot(INode target, List<INodeDirectory> snapshottableDirs) throws SnapshotException {
    if (target.isDirectory()) {
        INodeDirectory targetDir = target.asDirectory();
        DirectorySnapshottableFeature sf = targetDir.getDirectorySnapshottableFeature();
        if (sf != null) {
            if (sf.getNumSnapshots() > 0) {
                String fullPath = targetDir.getFullPathName();
                throw new SnapshotException("The directory " + fullPath + " cannot be deleted since " + fullPath + " is snapshottable and already has snapshots");
            } else {
                if (snapshottableDirs != null) {
                    snapshottableDirs.add(targetDir);
                }
            }
        }
        for (INode child : targetDir.getChildrenList(Snapshot.CURRENT_STATE_ID)) {
            checkSnapshot(child, snapshottableDirs);
        }
    }
}
Also used : DirectorySnapshottableFeature(org.apache.hadoop.hdfs.server.namenode.snapshot.DirectorySnapshottableFeature) SnapshotException(org.apache.hadoop.hdfs.protocol.SnapshotException)

Example 12 with SnapshotException

use of org.apache.hadoop.hdfs.protocol.SnapshotException in project hadoop by apache.

the class FSDirStatAndListingOp method getSnapshotsListing.

/**
   * Get a listing of all the snapshots of a snapshottable directory
   */
private static DirectoryListing getSnapshotsListing(FSDirectory fsd, INodesInPath iip, byte[] startAfter) throws IOException {
    Preconditions.checkState(fsd.hasReadLock());
    Preconditions.checkArgument(iip.isDotSnapshotDir(), "%s does not end with %s", iip.getPath(), HdfsConstants.SEPARATOR_DOT_SNAPSHOT_DIR);
    // drop off the null .snapshot component
    iip = iip.getParentINodesInPath();
    final String dirPath = iip.getPath();
    final INode node = iip.getLastINode();
    final INodeDirectory dirNode = INodeDirectory.valueOf(node, dirPath);
    final DirectorySnapshottableFeature sf = dirNode.getDirectorySnapshottableFeature();
    if (sf == null) {
        throw new SnapshotException("Directory is not a snapshottable directory: " + dirPath);
    }
    final ReadOnlyList<Snapshot> snapshots = sf.getSnapshotList();
    int skipSize = ReadOnlyList.Util.binarySearch(snapshots, startAfter);
    skipSize = skipSize < 0 ? -skipSize - 1 : skipSize + 1;
    int numOfListing = Math.min(snapshots.size() - skipSize, fsd.getLsLimit());
    final HdfsFileStatus[] listing = new HdfsFileStatus[numOfListing];
    for (int i = 0; i < numOfListing; i++) {
        Snapshot.Root sRoot = snapshots.get(i + skipSize).getRoot();
        listing[i] = createFileStatus(fsd, iip, sRoot, HdfsConstants.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED, false);
    }
    return new DirectoryListing(listing, snapshots.size() - skipSize - numOfListing);
}
Also used : DirectoryListing(org.apache.hadoop.hdfs.protocol.DirectoryListing) Snapshot(org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot) HdfsFileStatus(org.apache.hadoop.hdfs.protocol.HdfsFileStatus) DirectorySnapshottableFeature(org.apache.hadoop.hdfs.server.namenode.snapshot.DirectorySnapshottableFeature) SnapshotException(org.apache.hadoop.hdfs.protocol.SnapshotException)

Example 13 with SnapshotException

use of org.apache.hadoop.hdfs.protocol.SnapshotException in project hadoop by apache.

the class FSNamesystem method listCorruptFileBlocksWithSnapshot.

/**
   * Get the list of corrupt blocks and corresponding full file path
   * including snapshots in given snapshottable directories.
   * @param path Restrict corrupt files to this portion of namespace.
   * @param snapshottableDirs Snapshottable directories. Passing in null
   *                          will only return corrupt blocks in non-snapshots.
   * @param cookieTab Support for continuation; cookieTab tells where
   *                  to start from.
   * @return a list in which each entry describes a corrupt file/block
   * @throws IOException
   */
List<String> listCorruptFileBlocksWithSnapshot(String path, List<String> snapshottableDirs, String[] cookieTab) throws IOException {
    final Collection<CorruptFileBlockInfo> corruptFileBlocks = listCorruptFileBlocks(path, cookieTab);
    List<String> list = new ArrayList<String>();
    // Precalculate snapshottableFeature list
    List<DirectorySnapshottableFeature> lsf = new ArrayList<>();
    if (snapshottableDirs != null) {
        for (String snap : snapshottableDirs) {
            final INode isnap = getFSDirectory().getINode(snap, DirOp.READ_LINK);
            final DirectorySnapshottableFeature sf = isnap.asDirectory().getDirectorySnapshottableFeature();
            if (sf == null) {
                throw new SnapshotException("Directory is not a snapshottable directory: " + snap);
            }
            lsf.add(sf);
        }
    }
    for (CorruptFileBlockInfo c : corruptFileBlocks) {
        if (getFileInfo(c.path, true) != null) {
            list.add(c.toString());
        }
        final Collection<String> snaps = FSDirSnapshotOp.getSnapshotFiles(getFSDirectory(), lsf, c.path);
        if (snaps != null) {
            for (String snap : snaps) {
                // follow the syntax of CorruptFileBlockInfo#toString()
                list.add(c.block.getBlockName() + "\t" + snap);
            }
        }
    }
    return list;
}
Also used : ArrayList(java.util.ArrayList) DirectorySnapshottableFeature(org.apache.hadoop.hdfs.server.namenode.snapshot.DirectorySnapshottableFeature) SnapshotException(org.apache.hadoop.hdfs.protocol.SnapshotException)

Example 14 with SnapshotException

use of org.apache.hadoop.hdfs.protocol.SnapshotException in project hadoop by apache.

the class DirectorySnapshottableFeature method renameSnapshot.

/**
   * Rename a snapshot
   * @param path
   *          The directory path where the snapshot was taken. Used for
   *          generating exception message.
   * @param oldName
   *          Old name of the snapshot
   * @param newName
   *          New name the snapshot will be renamed to
   * @throws SnapshotException
   *           Throw SnapshotException when either the snapshot with the old
   *           name does not exist or a snapshot with the new name already
   *           exists
   */
public void renameSnapshot(String path, String oldName, String newName) throws SnapshotException {
    if (newName.equals(oldName)) {
        return;
    }
    final int indexOfOld = searchSnapshot(DFSUtil.string2Bytes(oldName));
    if (indexOfOld < 0) {
        throw new SnapshotException("The snapshot " + oldName + " does not exist for directory " + path);
    } else {
        final byte[] newNameBytes = DFSUtil.string2Bytes(newName);
        int indexOfNew = searchSnapshot(newNameBytes);
        if (indexOfNew >= 0) {
            throw new SnapshotException("The snapshot " + newName + " already exists for directory " + path);
        }
        // remove the one with old name from snapshotsByNames
        Snapshot snapshot = snapshotsByNames.remove(indexOfOld);
        final INodeDirectory ssRoot = snapshot.getRoot();
        ssRoot.setLocalName(newNameBytes);
        indexOfNew = -indexOfNew - 1;
        if (indexOfNew <= indexOfOld) {
            snapshotsByNames.add(indexOfNew, snapshot);
        } else {
            // indexOfNew > indexOfOld
            snapshotsByNames.add(indexOfNew - 1, snapshot);
        }
    }
}
Also used : INodeDirectory(org.apache.hadoop.hdfs.server.namenode.INodeDirectory) SnapshotException(org.apache.hadoop.hdfs.protocol.SnapshotException)

Example 15 with SnapshotException

use of org.apache.hadoop.hdfs.protocol.SnapshotException in project hive by apache.

the class SnapshotUtils method isSnapshotAvailable.

/**
 *  Checks whether a given snapshot exists or not.
 * @param dfs DistributedFileSystem.
 * @param path path of snapshot.
 * @param snapshotPrefix snapshot name prefix.
 * @param snapshotName name of snapshot.
 * @param conf Hive configuration.
 * @return true if the snapshot exists.
 * @throws IOException in case of any error.
 */
public static boolean isSnapshotAvailable(DistributedFileSystem dfs, Path path, String snapshotPrefix, String snapshotName, HiveConf conf) throws IOException {
    AtomicBoolean isSnapAvlb = new AtomicBoolean(false);
    Retryable retryable = Retryable.builder().withHiveConf(conf).withRetryOnException(IOException.class).withFailOnException(SnapshotException.class).build();
    try {
        retryable.executeCallable(() -> {
            isSnapAvlb.set(dfs.exists(new Path(path, HdfsConstants.DOT_SNAPSHOT_DIR + "/" + snapshotPrefix + snapshotName)));
            LOG.debug("Snapshot for path {} is {}", path, isSnapAvlb.get() ? "available" : "unavailable");
            return null;
        });
    } catch (Exception e) {
        throw new SnapshotException("Failed to check if snapshot is available on " + path, e);
    }
    return isSnapAvlb.get();
}
Also used : Path(org.apache.hadoop.fs.Path) ReplExternalTables.externalTableDataPath(org.apache.hadoop.hive.ql.exec.repl.ReplExternalTables.externalTableDataPath) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) Retryable(org.apache.hadoop.hive.ql.exec.util.Retryable) SnapshotException(org.apache.hadoop.hdfs.protocol.SnapshotException) IOException(java.io.IOException) SemanticException(org.apache.hadoop.hive.ql.parse.SemanticException) FileNotFoundException(java.io.FileNotFoundException) SnapshotException(org.apache.hadoop.hdfs.protocol.SnapshotException)

Aggregations

SnapshotException (org.apache.hadoop.hdfs.protocol.SnapshotException)17 Path (org.apache.hadoop.fs.Path)9 INodeDirectory (org.apache.hadoop.hdfs.server.namenode.INodeDirectory)5 DistributedFileSystem (org.apache.hadoop.hdfs.DistributedFileSystem)4 Test (org.junit.Test)4 IOException (java.io.IOException)3 DirectorySnapshottableFeature (org.apache.hadoop.hdfs.server.namenode.snapshot.DirectorySnapshottableFeature)3 SemanticException (org.apache.hadoop.hive.ql.parse.SemanticException)3 FileNotFoundException (java.io.FileNotFoundException)2 INodesInPath (org.apache.hadoop.hdfs.server.namenode.INodesInPath)2 ReplExternalTables.externalTableDataPath (org.apache.hadoop.hive.ql.exec.repl.ReplExternalTables.externalTableDataPath)2 HiveException (org.apache.hadoop.hive.ql.metadata.HiveException)2 RemoteException (org.apache.hadoop.ipc.RemoteException)2 TException (org.apache.thrift.TException)2 UnsupportedEncodingException (java.io.UnsupportedEncodingException)1 ArrayList (java.util.ArrayList)1 AtomicBoolean (java.util.concurrent.atomic.AtomicBoolean)1 FsPermission (org.apache.hadoop.fs.permission.FsPermission)1 DirectoryListing (org.apache.hadoop.hdfs.protocol.DirectoryListing)1 HdfsFileStatus (org.apache.hadoop.hdfs.protocol.HdfsFileStatus)1