Search in sources :

Example 46 with HdfsFileStatus

use of org.apache.hadoop.hdfs.protocol.HdfsFileStatus in project hadoop by apache.

the class DFSClient method callAppend.

/** Method to get stream returned by append call */
private DFSOutputStream callAppend(String src, EnumSet<CreateFlag> flag, Progressable progress, String[] favoredNodes) throws IOException {
    CreateFlag.validateForAppend(flag);
    try {
        final LastBlockWithStatus blkWithStatus = callAppend(src, new EnumSetWritable<>(flag, CreateFlag.class));
        HdfsFileStatus status = blkWithStatus.getFileStatus();
        if (status == null) {
            LOG.debug("NameNode is on an older version, request file " + "info with additional RPC call for file: {}", src);
            status = getFileInfo(src);
        }
        return DFSOutputStream.newStreamForAppend(this, src, flag, progress, blkWithStatus.getLastBlock(), status, dfsClientConf.createChecksum(null), favoredNodes);
    } catch (RemoteException re) {
        throw re.unwrapRemoteException(AccessControlException.class, FileNotFoundException.class, SafeModeException.class, DSQuotaExceededException.class, QuotaByStorageTypeExceededException.class, UnsupportedOperationException.class, UnresolvedPathException.class, SnapshotAccessControlException.class);
    }
}
Also used : CreateFlag(org.apache.hadoop.fs.CreateFlag) QuotaByStorageTypeExceededException(org.apache.hadoop.hdfs.protocol.QuotaByStorageTypeExceededException) LastBlockWithStatus(org.apache.hadoop.hdfs.protocol.LastBlockWithStatus) FileNotFoundException(java.io.FileNotFoundException) AccessControlException(org.apache.hadoop.security.AccessControlException) SnapshotAccessControlException(org.apache.hadoop.hdfs.protocol.SnapshotAccessControlException) HdfsFileStatus(org.apache.hadoop.hdfs.protocol.HdfsFileStatus) DSQuotaExceededException(org.apache.hadoop.hdfs.protocol.DSQuotaExceededException) SafeModeException(org.apache.hadoop.hdfs.server.namenode.SafeModeException) SnapshotAccessControlException(org.apache.hadoop.hdfs.protocol.SnapshotAccessControlException) RemoteException(org.apache.hadoop.ipc.RemoteException) UnresolvedPathException(org.apache.hadoop.hdfs.protocol.UnresolvedPathException)

Example 47 with HdfsFileStatus

use of org.apache.hadoop.hdfs.protocol.HdfsFileStatus in project hadoop by apache.

the class DistributedFileSystem method getFileStatus.

/**
   * Returns the stat information about the file.
   * @throws FileNotFoundException if the file does not exist.
   */
@Override
public FileStatus getFileStatus(Path f) throws IOException {
    statistics.incrementReadOps(1);
    storageStatistics.incrementOpCounter(OpType.GET_FILE_STATUS);
    Path absF = fixRelativePart(f);
    return new FileSystemLinkResolver<FileStatus>() {

        @Override
        public FileStatus doCall(final Path p) throws IOException {
            HdfsFileStatus fi = dfs.getFileInfo(getPathName(p));
            if (fi != null) {
                return fi.makeQualified(getUri(), p);
            } else {
                throw new FileNotFoundException("File does not exist: " + p);
            }
        }

        @Override
        public FileStatus next(final FileSystem fs, final Path p) throws IOException {
            return fs.getFileStatus(p);
        }
    }.resolve(this, absF);
}
Also used : Path(org.apache.hadoop.fs.Path) FileStatus(org.apache.hadoop.fs.FileStatus) LocatedFileStatus(org.apache.hadoop.fs.LocatedFileStatus) HdfsFileStatus(org.apache.hadoop.hdfs.protocol.HdfsFileStatus) HdfsLocatedFileStatus(org.apache.hadoop.hdfs.protocol.HdfsLocatedFileStatus) HdfsFileStatus(org.apache.hadoop.hdfs.protocol.HdfsFileStatus) FileSystem(org.apache.hadoop.fs.FileSystem) FileNotFoundException(java.io.FileNotFoundException) IOException(java.io.IOException)

Example 48 with HdfsFileStatus

use of org.apache.hadoop.hdfs.protocol.HdfsFileStatus in project hadoop by apache.

the class NamenodeFsck method fsck.

/**
   * Check files on DFS, starting from the indicated path.
   */
public void fsck() {
    final long startTime = Time.monotonicNow();
    try {
        if (blockIds != null) {
            String[] blocks = blockIds.split(" ");
            StringBuilder sb = new StringBuilder();
            sb.append("FSCK started by " + UserGroupInformation.getCurrentUser() + " from " + remoteAddress + " at " + new Date());
            out.println(sb);
            sb.append(" for blockIds: \n");
            for (String blk : blocks) {
                if (blk == null || !blk.contains(Block.BLOCK_FILE_PREFIX)) {
                    out.println("Incorrect blockId format: " + blk);
                    continue;
                }
                out.print("\n");
                blockIdCK(blk);
                sb.append(blk + "\n");
            }
            LOG.info(sb);
            namenode.getNamesystem().logFsckEvent("/", remoteAddress);
            out.flush();
            return;
        }
        String msg = "FSCK started by " + UserGroupInformation.getCurrentUser() + " from " + remoteAddress + " for path " + path + " at " + new Date();
        LOG.info(msg);
        out.println(msg);
        namenode.getNamesystem().logFsckEvent(path, remoteAddress);
        if (snapshottableDirs != null) {
            SnapshottableDirectoryStatus[] snapshotDirs = namenode.getRpcServer().getSnapshottableDirListing();
            if (snapshotDirs != null) {
                for (SnapshottableDirectoryStatus dir : snapshotDirs) {
                    snapshottableDirs.add(dir.getFullPath().toString());
                }
            }
        }
        final HdfsFileStatus file = namenode.getRpcServer().getFileInfo(path);
        if (file != null) {
            if (showCorruptFileBlocks) {
                listCorruptFileBlocks();
                return;
            }
            if (this.showStoragePolcies) {
                storageTypeSummary = new StoragePolicySummary(namenode.getNamesystem().getBlockManager().getStoragePolicies());
            }
            Result replRes = new ReplicationResult(conf);
            Result ecRes = new ErasureCodingResult(conf);
            check(path, file, replRes, ecRes);
            out.print("\nStatus: ");
            out.println(replRes.isHealthy() && ecRes.isHealthy() ? "HEALTHY" : "CORRUPT");
            out.println(" Number of data-nodes:\t" + totalDatanodes);
            out.println(" Number of racks:\t\t" + networktopology.getNumOfRacks());
            out.println(" Total dirs:\t\t\t" + totalDirs);
            out.println(" Total symlinks:\t\t" + totalSymlinks);
            out.println("\nReplicated Blocks:");
            out.println(replRes);
            out.println("\nErasure Coded Block Groups:");
            out.println(ecRes);
            if (this.showStoragePolcies) {
                out.print(storageTypeSummary);
            }
            out.println("FSCK ended at " + new Date() + " in " + (Time.monotonicNow() - startTime + " milliseconds"));
            // fatal.  Otherwise many unit tests will pass even when there are bugs.
            if (internalError) {
                throw new IOException("fsck encountered internal errors!");
            }
            // of the report.
            if (replRes.isHealthy() && ecRes.isHealthy()) {
                out.print("\n\nThe filesystem under path '" + path + "' " + HEALTHY_STATUS);
            } else {
                out.print("\n\nThe filesystem under path '" + path + "' " + CORRUPT_STATUS);
            }
        } else {
            out.print("\n\nPath '" + path + "' " + NONEXISTENT_STATUS);
        }
    } catch (Exception e) {
        String errMsg = "Fsck on path '" + path + "' " + FAILURE_STATUS;
        LOG.warn(errMsg, e);
        out.println("FSCK ended at " + new Date() + " in " + (Time.monotonicNow() - startTime + " milliseconds"));
        out.println(e.getMessage());
        out.print("\n\n" + errMsg);
    } finally {
        out.close();
    }
}
Also used : IOException(java.io.IOException) Date(java.util.Date) UnresolvedLinkException(org.apache.hadoop.fs.UnresolvedLinkException) FileNotFoundException(java.io.FileNotFoundException) IOException(java.io.IOException) AccessControlException(org.apache.hadoop.security.AccessControlException) HdfsFileStatus(org.apache.hadoop.hdfs.protocol.HdfsFileStatus) SnapshottableDirectoryStatus(org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus)

Example 49 with HdfsFileStatus

use of org.apache.hadoop.hdfs.protocol.HdfsFileStatus in project hadoop by apache.

the class NamenodeFsck method lostFoundInit.

private void lostFoundInit(DFSClient dfs) {
    lfInited = true;
    try {
        String lfName = "/lost+found";
        final HdfsFileStatus lfStatus = dfs.getFileInfo(lfName);
        if (lfStatus == null) {
            // not exists
            lfInitedOk = dfs.mkdirs(lfName, null, true);
            lostFound = lfName;
        } else if (!lfStatus.isDir()) {
            // exists but not a directory
            LOG.warn("Cannot use /lost+found : a regular file with this name exists.");
            lfInitedOk = false;
        } else {
            // exists and is a directory
            lostFound = lfName;
            lfInitedOk = true;
        }
    } catch (Exception e) {
        e.printStackTrace();
        lfInitedOk = false;
    }
    if (lostFound == null) {
        LOG.warn("Cannot initialize /lost+found .");
        lfInitedOk = false;
        internalError = true;
    }
}
Also used : HdfsFileStatus(org.apache.hadoop.hdfs.protocol.HdfsFileStatus) UnresolvedLinkException(org.apache.hadoop.fs.UnresolvedLinkException) FileNotFoundException(java.io.FileNotFoundException) IOException(java.io.IOException) AccessControlException(org.apache.hadoop.security.AccessControlException)

Example 50 with HdfsFileStatus

use of org.apache.hadoop.hdfs.protocol.HdfsFileStatus in project hadoop by apache.

the class TestErasureCodingPolicies method verifyErasureCodingInfo.

private void verifyErasureCodingInfo(String src, ErasureCodingPolicy usingECPolicy) throws IOException {
    HdfsFileStatus hdfsFileStatus = fs.getClient().getFileInfo(src);
    ErasureCodingPolicy ecPolicy = hdfsFileStatus.getErasureCodingPolicy();
    assertNotNull(ecPolicy);
    assertEquals("Actually used ecPolicy should be equal with target ecPolicy", usingECPolicy, ecPolicy);
}
Also used : HdfsFileStatus(org.apache.hadoop.hdfs.protocol.HdfsFileStatus) ErasureCodingPolicy(org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy)

Aggregations

HdfsFileStatus (org.apache.hadoop.hdfs.protocol.HdfsFileStatus)124 Test (org.junit.Test)51 FileHandle (org.apache.hadoop.nfs.nfs3.FileHandle)34 IOException (java.io.IOException)28 InetSocketAddress (java.net.InetSocketAddress)28 XDR (org.apache.hadoop.oncrpc.XDR)28 AccessControlException (org.apache.hadoop.security.AccessControlException)26 Path (org.apache.hadoop.fs.Path)23 SnapshotAccessControlException (org.apache.hadoop.hdfs.protocol.SnapshotAccessControlException)23 FileNotFoundException (java.io.FileNotFoundException)16 DFSClient (org.apache.hadoop.hdfs.DFSClient)11 DirectoryListing (org.apache.hadoop.hdfs.protocol.DirectoryListing)11 FsPermission (org.apache.hadoop.fs.permission.FsPermission)10 DistributedFileSystem (org.apache.hadoop.hdfs.DistributedFileSystem)9 MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)9 LocatedBlock (org.apache.hadoop.hdfs.protocol.LocatedBlock)8 SetAttr3 (org.apache.hadoop.nfs.nfs3.request.SetAttr3)8 FileStatus (org.apache.hadoop.fs.FileStatus)7 Matchers.anyString (org.mockito.Matchers.anyString)7 Configuration (org.apache.hadoop.conf.Configuration)6