Search in sources :

Example 16 with HdfsFileStatus

use of org.apache.hadoop.hdfs.protocol.HdfsFileStatus in project hadoop by apache.

the class FSNamesystem method startFile.

/**
   * Create a new file entry in the namespace.
   * 
   * For description of parameters and exceptions thrown see
   * {@link ClientProtocol#create}, except it returns valid file status upon
   * success
   */
HdfsFileStatus startFile(String src, PermissionStatus permissions, String holder, String clientMachine, EnumSet<CreateFlag> flag, boolean createParent, short replication, long blockSize, CryptoProtocolVersion[] supportedVersions, boolean logRetryCache) throws IOException {
    HdfsFileStatus status;
    try {
        status = startFileInt(src, permissions, holder, clientMachine, flag, createParent, replication, blockSize, supportedVersions, logRetryCache);
    } catch (AccessControlException e) {
        logAuditEvent(false, "create", src);
        throw e;
    }
    logAuditEvent(true, "create", src, null, status);
    return status;
}
Also used : HdfsFileStatus(org.apache.hadoop.hdfs.protocol.HdfsFileStatus) AccessControlException(org.apache.hadoop.security.AccessControlException) SnapshotAccessControlException(org.apache.hadoop.hdfs.protocol.SnapshotAccessControlException)

Example 17 with HdfsFileStatus

use of org.apache.hadoop.hdfs.protocol.HdfsFileStatus in project hadoop by apache.

the class FSNamesystem method removeXAttr.

void removeXAttr(String src, XAttr xAttr, boolean logRetryCache) throws IOException {
    final String operationName = "removeXAttr";
    HdfsFileStatus auditStat = null;
    writeLock();
    try {
        checkOperation(OperationCategory.WRITE);
        checkNameNodeSafeMode("Cannot remove XAttr entry on " + src);
        auditStat = FSDirXAttrOp.removeXAttr(dir, src, xAttr, logRetryCache);
    } catch (AccessControlException e) {
        logAuditEvent(false, operationName, src);
        throw e;
    } finally {
        writeUnlock(operationName);
    }
    getEditLog().logSync();
    logAuditEvent(true, operationName, src, null, auditStat);
}
Also used : HdfsFileStatus(org.apache.hadoop.hdfs.protocol.HdfsFileStatus) AccessControlException(org.apache.hadoop.security.AccessControlException) SnapshotAccessControlException(org.apache.hadoop.hdfs.protocol.SnapshotAccessControlException)

Example 18 with HdfsFileStatus

use of org.apache.hadoop.hdfs.protocol.HdfsFileStatus in project hadoop by apache.

the class TestFileStatus method testGetFileInfo.

/** Test calling getFileInfo directly on the client */
@Test
public void testGetFileInfo() throws IOException {
    // Check that / exists
    Path path = new Path("/");
    assertTrue("/ should be a directory", fs.getFileStatus(path).isDirectory());
    // Make sure getFileInfo returns null for files which do not exist
    HdfsFileStatus fileInfo = dfsClient.getFileInfo("/noSuchFile");
    assertEquals("Non-existant file should result in null", null, fileInfo);
    Path path1 = new Path("/name1");
    Path path2 = new Path("/name1/name2");
    assertTrue(fs.mkdirs(path1));
    FSDataOutputStream out = fs.create(path2, false);
    out.close();
    fileInfo = dfsClient.getFileInfo(path1.toString());
    assertEquals(1, fileInfo.getChildrenNum());
    fileInfo = dfsClient.getFileInfo(path2.toString());
    assertEquals(0, fileInfo.getChildrenNum());
    // Test getFileInfo throws the right exception given a non-absolute path.
    try {
        dfsClient.getFileInfo("non-absolute");
        fail("getFileInfo for a non-absolute path did not throw IOException");
    } catch (RemoteException re) {
        assertTrue("Wrong exception for invalid file name: " + re, re.toString().contains("Absolute path required"));
    }
}
Also used : Path(org.apache.hadoop.fs.Path) HdfsFileStatus(org.apache.hadoop.hdfs.protocol.HdfsFileStatus) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream) RemoteException(org.apache.hadoop.ipc.RemoteException) Test(org.junit.Test)

Example 19 with HdfsFileStatus

use of org.apache.hadoop.hdfs.protocol.HdfsFileStatus in project hadoop by apache.

the class TestFsck method testECFsck.

@Test
public void testECFsck() throws Exception {
    FileSystem fs = null;
    final long precision = 1L;
    conf.setLong(DFSConfigKeys.DFS_NAMENODE_ACCESSTIME_PRECISION_KEY, precision);
    conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 10000L);
    conf.set(DFSConfigKeys.DFS_NAMENODE_EC_POLICIES_ENABLED_KEY, StripedFileTestUtil.getDefaultECPolicy().getName());
    int dataBlocks = StripedFileTestUtil.getDefaultECPolicy().getNumDataUnits();
    int parityBlocks = StripedFileTestUtil.getDefaultECPolicy().getNumParityUnits();
    int totalSize = dataBlocks + parityBlocks;
    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(totalSize).build();
    fs = cluster.getFileSystem();
    // create a contiguous file
    Path replDirPath = new Path("/replicated");
    Path replFilePath = new Path(replDirPath, "replfile");
    final short factor = 3;
    DFSTestUtil.createFile(fs, replFilePath, 1024, factor, 0);
    DFSTestUtil.waitReplication(fs, replFilePath, factor);
    // create a large striped file
    Path ecDirPath = new Path("/striped");
    Path largeFilePath = new Path(ecDirPath, "largeFile");
    DFSTestUtil.createStripedFile(cluster, largeFilePath, ecDirPath, 1, 2, true);
    // create a small striped file
    Path smallFilePath = new Path(ecDirPath, "smallFile");
    DFSTestUtil.writeFile(fs, smallFilePath, "hello world!");
    long replTime = fs.getFileStatus(replFilePath).getAccessTime();
    long ecTime = fs.getFileStatus(largeFilePath).getAccessTime();
    Thread.sleep(precision);
    setupAuditLogs();
    String outStr = runFsck(conf, 0, true, "/");
    verifyAuditLogs();
    assertEquals(replTime, fs.getFileStatus(replFilePath).getAccessTime());
    assertEquals(ecTime, fs.getFileStatus(largeFilePath).getAccessTime());
    System.out.println(outStr);
    assertTrue(outStr.contains(NamenodeFsck.HEALTHY_STATUS));
    shutdownCluster();
    // restart the cluster; bring up namenode but not the data nodes
    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).format(false).build();
    outStr = runFsck(conf, 1, true, "/", "-files", "-blocks");
    // expect the result is corrupt
    assertTrue(outStr.contains(NamenodeFsck.CORRUPT_STATUS));
    String[] outLines = outStr.split("\\r?\\n");
    for (String line : outLines) {
        if (line.contains(largeFilePath.toString())) {
            final HdfsFileStatus file = cluster.getNameNode().getRpcServer().getFileInfo(largeFilePath.toString());
            assertTrue(line.contains("policy=" + file.getErasureCodingPolicy().getName()));
        } else if (line.contains(replFilePath.toString())) {
            assertTrue(line.contains("replication=" + cluster.getFileSystem().getFileStatus(replFilePath).getReplication()));
        }
    }
    System.out.println(outStr);
}
Also used : Path(org.apache.hadoop.fs.Path) MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) FileSystem(org.apache.hadoop.fs.FileSystem) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) HdfsFileStatus(org.apache.hadoop.hdfs.protocol.HdfsFileStatus) Matchers.anyString(org.mockito.Matchers.anyString) Test(org.junit.Test)

Example 20 with HdfsFileStatus

use of org.apache.hadoop.hdfs.protocol.HdfsFileStatus in project hadoop by apache.

the class TestFsck method testFsckFileNotFound.

/** Test fsck with FileNotFound. */
@Test
public void testFsckFileNotFound() throws Exception {
    // Number of replicas to actually start
    final short numReplicas = 1;
    NameNode namenode = mock(NameNode.class);
    NetworkTopology nettop = mock(NetworkTopology.class);
    Map<String, String[]> pmap = new HashMap<>();
    Writer result = new StringWriter();
    PrintWriter out = new PrintWriter(result, true);
    InetAddress remoteAddress = InetAddress.getLocalHost();
    FSNamesystem fsName = mock(FSNamesystem.class);
    FSDirectory fsd = mock(FSDirectory.class);
    BlockManager blockManager = mock(BlockManager.class);
    DatanodeManager dnManager = mock(DatanodeManager.class);
    INodesInPath iip = mock(INodesInPath.class);
    when(namenode.getNamesystem()).thenReturn(fsName);
    when(fsName.getBlockManager()).thenReturn(blockManager);
    when(fsName.getFSDirectory()).thenReturn(fsd);
    when(fsd.getFSNamesystem()).thenReturn(fsName);
    when(fsd.resolvePath(anyObject(), anyString(), any(DirOp.class))).thenReturn(iip);
    when(blockManager.getDatanodeManager()).thenReturn(dnManager);
    NamenodeFsck fsck = new NamenodeFsck(conf, namenode, nettop, pmap, out, numReplicas, remoteAddress);
    String pathString = "/tmp/testFile";
    long length = 123L;
    boolean isDir = false;
    int blockReplication = 1;
    long blockSize = 128 * 1024L;
    long modTime = 123123123L;
    long accessTime = 123123120L;
    FsPermission perms = FsPermission.getDefault();
    String owner = "foo";
    String group = "bar";
    byte[] symlink = null;
    byte[] path = DFSUtil.string2Bytes(pathString);
    long fileId = 312321L;
    int numChildren = 1;
    byte storagePolicy = 0;
    HdfsFileStatus file = new HdfsFileStatus(length, isDir, blockReplication, blockSize, modTime, accessTime, perms, owner, group, symlink, path, fileId, numChildren, null, storagePolicy, null);
    Result replRes = new ReplicationResult(conf);
    Result ecRes = new ErasureCodingResult(conf);
    try {
        fsck.check(pathString, file, replRes, ecRes);
    } catch (Exception e) {
        fail("Unexpected exception " + e.getMessage());
    }
    assertTrue(replRes.isHealthy());
}
Also used : DirOp(org.apache.hadoop.hdfs.server.namenode.FSDirectory.DirOp) HashMap(java.util.HashMap) Matchers.anyString(org.mockito.Matchers.anyString) ReplicationResult(org.apache.hadoop.hdfs.server.namenode.NamenodeFsck.ReplicationResult) Result(org.apache.hadoop.hdfs.server.namenode.NamenodeFsck.Result) ErasureCodingResult(org.apache.hadoop.hdfs.server.namenode.NamenodeFsck.ErasureCodingResult) ReplicationResult(org.apache.hadoop.hdfs.server.namenode.NamenodeFsck.ReplicationResult) ErasureCodingResult(org.apache.hadoop.hdfs.server.namenode.NamenodeFsck.ErasureCodingResult) StringWriter(java.io.StringWriter) HdfsFileStatus(org.apache.hadoop.hdfs.protocol.HdfsFileStatus) FsPermission(org.apache.hadoop.fs.permission.FsPermission) PrintWriter(java.io.PrintWriter) IOException(java.io.IOException) ChecksumException(org.apache.hadoop.fs.ChecksumException) TimeoutException(java.util.concurrent.TimeoutException) UnresolvedLinkException(org.apache.hadoop.fs.UnresolvedLinkException) FileNotFoundException(java.io.FileNotFoundException) AccessControlException(org.apache.hadoop.security.AccessControlException) DatanodeManager(org.apache.hadoop.hdfs.server.blockmanagement.DatanodeManager) BlockManager(org.apache.hadoop.hdfs.server.blockmanagement.BlockManager) NetworkTopology(org.apache.hadoop.net.NetworkTopology) InetAddress(java.net.InetAddress) PrintWriter(java.io.PrintWriter) StringWriter(java.io.StringWriter) Writer(java.io.Writer) HostsFileWriter(org.apache.hadoop.hdfs.util.HostsFileWriter) Test(org.junit.Test)

Aggregations

HdfsFileStatus (org.apache.hadoop.hdfs.protocol.HdfsFileStatus)124 Test (org.junit.Test)51 FileHandle (org.apache.hadoop.nfs.nfs3.FileHandle)34 IOException (java.io.IOException)28 InetSocketAddress (java.net.InetSocketAddress)28 XDR (org.apache.hadoop.oncrpc.XDR)28 AccessControlException (org.apache.hadoop.security.AccessControlException)26 Path (org.apache.hadoop.fs.Path)23 SnapshotAccessControlException (org.apache.hadoop.hdfs.protocol.SnapshotAccessControlException)23 FileNotFoundException (java.io.FileNotFoundException)16 DFSClient (org.apache.hadoop.hdfs.DFSClient)11 DirectoryListing (org.apache.hadoop.hdfs.protocol.DirectoryListing)11 FsPermission (org.apache.hadoop.fs.permission.FsPermission)10 DistributedFileSystem (org.apache.hadoop.hdfs.DistributedFileSystem)9 MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)9 LocatedBlock (org.apache.hadoop.hdfs.protocol.LocatedBlock)8 SetAttr3 (org.apache.hadoop.nfs.nfs3.request.SetAttr3)8 FileStatus (org.apache.hadoop.fs.FileStatus)7 Matchers.anyString (org.mockito.Matchers.anyString)7 Configuration (org.apache.hadoop.conf.Configuration)6