Search in sources :

Example 1 with FSDirectory

use of org.apache.hadoop.hdfs.server.namenode.FSDirectory in project hadoop by apache.

the class TestNestedSnapshots method testDisallowNestedSnapshottableDir.

/**
   * When we have nested snapshottable directories and if we try to reset the
   * snapshottable descendant back to an regular directory, we need to replace
   * the snapshottable descendant with an INodeDirectoryWithSnapshot
   */
@Test
public void testDisallowNestedSnapshottableDir() throws Exception {
    cluster.getNamesystem().getSnapshotManager().setAllowNestedSnapshots(true);
    final Path dir = new Path("/dir");
    final Path sub = new Path(dir, "sub");
    hdfs.mkdirs(sub);
    SnapshotTestHelper.createSnapshot(hdfs, dir, "s1");
    final Path file = new Path(sub, "file");
    DFSTestUtil.createFile(hdfs, file, BLOCKSIZE, REPLICATION, SEED);
    FSDirectory fsdir = cluster.getNamesystem().getFSDirectory();
    INode subNode = fsdir.getINode(sub.toString());
    assertTrue(subNode.asDirectory().isWithSnapshot());
    hdfs.allowSnapshot(sub);
    subNode = fsdir.getINode(sub.toString());
    assertTrue(subNode.isDirectory() && subNode.asDirectory().isSnapshottable());
    hdfs.disallowSnapshot(sub);
    subNode = fsdir.getINode(sub.toString());
    assertTrue(subNode.asDirectory().isWithSnapshot());
}
Also used : Path(org.apache.hadoop.fs.Path) INode(org.apache.hadoop.hdfs.server.namenode.INode) FSDirectory(org.apache.hadoop.hdfs.server.namenode.FSDirectory) Test(org.junit.Test)

Example 2 with FSDirectory

use of org.apache.hadoop.hdfs.server.namenode.FSDirectory in project hadoop by apache.

the class CacheReplicationMonitor method rescanCacheDirectives.

/**
   * Scan all CacheDirectives.  Use the information to figure out
   * what cache replication factor each block should have.
   */
private void rescanCacheDirectives() {
    FSDirectory fsDir = namesystem.getFSDirectory();
    final long now = new Date().getTime();
    for (CacheDirective directive : cacheManager.getCacheDirectives()) {
        scannedDirectives++;
        // Skip processing this entry if it has expired
        if (directive.getExpiryTime() > 0 && directive.getExpiryTime() <= now) {
            LOG.debug("Directive {}: the directive expired at {} (now = {})", directive.getId(), directive.getExpiryTime(), now);
            continue;
        }
        String path = directive.getPath();
        INode node;
        try {
            node = fsDir.getINode(path, DirOp.READ);
        } catch (IOException e) {
            // We don't cache through symlinks or invalid paths
            LOG.debug("Directive {}: Failed to resolve path {} ({})", directive.getId(), path, e.getMessage());
            continue;
        }
        if (node == null) {
            LOG.debug("Directive {}: No inode found at {}", directive.getId(), path);
        } else if (node.isDirectory()) {
            INodeDirectory dir = node.asDirectory();
            ReadOnlyList<INode> children = dir.getChildrenList(Snapshot.CURRENT_STATE_ID);
            for (INode child : children) {
                if (child.isFile()) {
                    rescanFile(directive, child.asFile());
                }
            }
        } else if (node.isFile()) {
            rescanFile(directive, node.asFile());
        } else {
            LOG.debug("Directive {}: ignoring non-directive, non-file inode {} ", directive.getId(), node);
        }
    }
}
Also used : INodeDirectory(org.apache.hadoop.hdfs.server.namenode.INodeDirectory) INode(org.apache.hadoop.hdfs.server.namenode.INode) ReadOnlyList(org.apache.hadoop.hdfs.util.ReadOnlyList) CacheDirective(org.apache.hadoop.hdfs.protocol.CacheDirective) FSDirectory(org.apache.hadoop.hdfs.server.namenode.FSDirectory) IOException(java.io.IOException) Date(java.util.Date)

Example 3 with FSDirectory

use of org.apache.hadoop.hdfs.server.namenode.FSDirectory in project hadoop by apache.

the class TestProtectedDirectories method testReconfigureProtectedPaths.

@Test
public void testReconfigureProtectedPaths() throws Throwable {
    Configuration conf = new HdfsConfiguration();
    Collection<Path> protectedPaths = Arrays.asList(new Path("/a"), new Path("/b"), new Path("/c"));
    Collection<Path> unprotectedPaths = Arrays.asList();
    MiniDFSCluster cluster = setupTestCase(conf, protectedPaths, unprotectedPaths);
    SortedSet<String> protectedPathsNew = new TreeSet<>(FSDirectory.normalizePaths(Arrays.asList("/aa", "/bb", "/cc"), FS_PROTECTED_DIRECTORIES));
    String protectedPathsStrNew = "/aa,/bb,/cc";
    NameNode nn = cluster.getNameNode();
    // change properties
    nn.reconfigureProperty(FS_PROTECTED_DIRECTORIES, protectedPathsStrNew);
    FSDirectory fsDirectory = nn.getNamesystem().getFSDirectory();
    // verify change
    assertEquals(String.format("%s has wrong value", FS_PROTECTED_DIRECTORIES), protectedPathsNew, fsDirectory.getProtectedDirectories());
    assertEquals(String.format("%s has wrong value", FS_PROTECTED_DIRECTORIES), protectedPathsStrNew, nn.getConf().get(FS_PROTECTED_DIRECTORIES));
    // revert to default
    nn.reconfigureProperty(FS_PROTECTED_DIRECTORIES, null);
    // verify default
    assertEquals(String.format("%s has wrong value", FS_PROTECTED_DIRECTORIES), new TreeSet<String>(), fsDirectory.getProtectedDirectories());
    assertEquals(String.format("%s has wrong value", FS_PROTECTED_DIRECTORIES), null, nn.getConf().get(FS_PROTECTED_DIRECTORIES));
}
Also used : Path(org.apache.hadoop.fs.Path) MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) Configuration(org.apache.hadoop.conf.Configuration) FSDirectory(org.apache.hadoop.hdfs.server.namenode.FSDirectory) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) Test(org.junit.Test)

Example 4 with FSDirectory

use of org.apache.hadoop.hdfs.server.namenode.FSDirectory in project hadoop by apache.

the class DFSTestUtil method createStripedFile.

/**
   * Creates the metadata of a file in striped layout. This method only
   * manipulates the NameNode state without injecting data to DataNode.
   * You should disable periodical heartbeat before use this.
   * @param file Path of the file to create
   * @param dir Parent path of the file
   * @param numBlocks Number of striped block groups to add to the file
   * @param numStripesPerBlk Number of striped cells in each block
   * @param toMkdir
   * @param ecPolicy erasure coding policy apply to created file. A null value
   *                 means using default erasure coding policy.
   */
public static void createStripedFile(MiniDFSCluster cluster, Path file, Path dir, int numBlocks, int numStripesPerBlk, boolean toMkdir, ErasureCodingPolicy ecPolicy) throws Exception {
    DistributedFileSystem dfs = cluster.getFileSystem();
    // If outer test already set EC policy, dir should be left as null
    if (toMkdir) {
        assert dir != null;
        dfs.mkdirs(dir);
        try {
            dfs.getClient().setErasureCodingPolicy(dir.toString(), ecPolicy.getName());
        } catch (IOException e) {
            if (!e.getMessage().contains("non-empty directory")) {
                throw e;
            }
        }
    }
    cluster.getNameNodeRpc().create(file.toString(), new FsPermission((short) 0755), dfs.getClient().getClientName(), new EnumSetWritable<>(EnumSet.of(CreateFlag.CREATE)), false, (short) 1, 128 * 1024 * 1024L, null);
    FSNamesystem ns = cluster.getNamesystem();
    FSDirectory fsdir = ns.getFSDirectory();
    INodeFile fileNode = fsdir.getINode4Write(file.toString()).asFile();
    ExtendedBlock previous = null;
    for (int i = 0; i < numBlocks; i++) {
        Block newBlock = addBlockToFile(true, cluster.getDataNodes(), dfs, ns, file.toString(), fileNode, dfs.getClient().getClientName(), previous, numStripesPerBlk, 0);
        previous = new ExtendedBlock(ns.getBlockPoolId(), newBlock);
    }
    dfs.getClient().namenode.complete(file.toString(), dfs.getClient().getClientName(), previous, fileNode.getId());
}
Also used : ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) Block(org.apache.hadoop.hdfs.protocol.Block) LocatedBlock(org.apache.hadoop.hdfs.protocol.LocatedBlock) ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) FSDirectory(org.apache.hadoop.hdfs.server.namenode.FSDirectory) InterruptedIOException(java.io.InterruptedIOException) IOException(java.io.IOException) FsPermission(org.apache.hadoop.fs.permission.FsPermission) FSNamesystem(org.apache.hadoop.hdfs.server.namenode.FSNamesystem) INodeFile(org.apache.hadoop.hdfs.server.namenode.INodeFile)

Example 5 with FSDirectory

use of org.apache.hadoop.hdfs.server.namenode.FSDirectory in project hadoop by apache.

the class TestFileAppend4 method testAppendInsufficientLocations.

/**
   * Test that an append with no locations fails with an exception
   * showing insufficient locations.
   */
@Test(timeout = 60000)
public void testAppendInsufficientLocations() throws Exception {
    Configuration conf = new Configuration();
    // lower heartbeat interval for fast recognition of DN
    conf.setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 1000);
    conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1);
    conf.setInt(HdfsClientConfigKeys.DFS_CLIENT_SOCKET_TIMEOUT_KEY, 3000);
    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(4).build();
    DistributedFileSystem fileSystem = null;
    try {
        // create a file with replication 3
        fileSystem = cluster.getFileSystem();
        Path f = new Path("/testAppend");
        FSDataOutputStream create = fileSystem.create(f, (short) 2);
        create.write("/testAppend".getBytes());
        create.close();
        // Check for replications
        DFSTestUtil.waitReplication(fileSystem, f, (short) 2);
        // Shut down all DNs that have the last block location for the file
        LocatedBlocks lbs = fileSystem.dfs.getNamenode().getBlockLocations("/testAppend", 0, Long.MAX_VALUE);
        List<DataNode> dnsOfCluster = cluster.getDataNodes();
        DatanodeInfo[] dnsWithLocations = lbs.getLastLocatedBlock().getLocations();
        for (DataNode dn : dnsOfCluster) {
            for (DatanodeInfo loc : dnsWithLocations) {
                if (dn.getDatanodeId().equals(loc)) {
                    dn.shutdown();
                    DFSTestUtil.waitForDatanodeDeath(dn);
                }
            }
        }
        // Wait till 0 replication is recognized
        DFSTestUtil.waitReplication(fileSystem, f, (short) 0);
        // have the block.
        try {
            fileSystem.append(f);
            fail("Append should fail because insufficient locations");
        } catch (IOException e) {
            LOG.info("Expected exception: ", e);
        }
        FSDirectory dir = cluster.getNamesystem().getFSDirectory();
        final INodeFile inode = INodeFile.valueOf(dir.getINode("/testAppend"), "/testAppend");
        assertTrue("File should remain closed", !inode.isUnderConstruction());
    } finally {
        if (null != fileSystem) {
            fileSystem.close();
        }
        cluster.shutdown();
    }
}
Also used : Path(org.apache.hadoop.fs.Path) DatanodeInfo(org.apache.hadoop.hdfs.protocol.DatanodeInfo) Configuration(org.apache.hadoop.conf.Configuration) LocatedBlocks(org.apache.hadoop.hdfs.protocol.LocatedBlocks) FSDirectory(org.apache.hadoop.hdfs.server.namenode.FSDirectory) IOException(java.io.IOException) INodeFile(org.apache.hadoop.hdfs.server.namenode.INodeFile) DataNode(org.apache.hadoop.hdfs.server.datanode.DataNode) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream) Test(org.junit.Test)

Aggregations

FSDirectory (org.apache.hadoop.hdfs.server.namenode.FSDirectory)10 Test (org.junit.Test)7 Path (org.apache.hadoop.fs.Path)6 IOException (java.io.IOException)4 INode (org.apache.hadoop.hdfs.server.namenode.INode)4 INodeDirectory (org.apache.hadoop.hdfs.server.namenode.INodeDirectory)4 INodeFile (org.apache.hadoop.hdfs.server.namenode.INodeFile)4 INodesInPath (org.apache.hadoop.hdfs.server.namenode.INodesInPath)4 Configuration (org.apache.hadoop.conf.Configuration)2 FSDataOutputStream (org.apache.hadoop.fs.FSDataOutputStream)2 NSQuotaExceededException (org.apache.hadoop.hdfs.protocol.NSQuotaExceededException)2 QuotaCounts (org.apache.hadoop.hdfs.server.namenode.QuotaCounts)2 DirectoryDiff (org.apache.hadoop.hdfs.server.namenode.snapshot.DirectoryWithSnapshotFeature.DirectoryDiff)2 File (java.io.File)1 InterruptedIOException (java.io.InterruptedIOException)1 Date (java.util.Date)1 FsPermission (org.apache.hadoop.fs.permission.FsPermission)1 HdfsConfiguration (org.apache.hadoop.hdfs.HdfsConfiguration)1 MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)1 Block (org.apache.hadoop.hdfs.protocol.Block)1