Search in sources :

Example 81 with DFSClient

use of org.apache.hadoop.hdfs.DFSClient in project hadoop by apache.

the class TestDecommissioningStatus method testDecommissionStatus.

/**
   * Tests Decommissioning Status in DFS.
   */
@Test
public void testDecommissionStatus() throws Exception {
    InetSocketAddress addr = new InetSocketAddress("localhost", cluster.getNameNodePort());
    DFSClient client = new DFSClient(addr, conf);
    DatanodeInfo[] info = client.datanodeReport(DatanodeReportType.LIVE);
    assertEquals("Number of Datanodes ", 2, info.length);
    DistributedFileSystem fileSys = cluster.getFileSystem();
    DFSAdmin admin = new DFSAdmin(cluster.getConfiguration(0));
    short replicas = numDatanodes;
    //
    // Decommission one node. Verify the decommission status
    //
    Path file1 = new Path("decommission.dat");
    DFSTestUtil.createFile(fileSys, file1, fileSize, fileSize, blockSize, replicas, seed);
    Path file2 = new Path("decommission1.dat");
    FSDataOutputStream st1 = AdminStatesBaseTest.writeIncompleteFile(fileSys, file2, replicas, (short) (fileSize / blockSize));
    for (DataNode d : cluster.getDataNodes()) {
        DataNodeTestUtils.triggerBlockReport(d);
    }
    FSNamesystem fsn = cluster.getNamesystem();
    final DatanodeManager dm = fsn.getBlockManager().getDatanodeManager();
    for (int iteration = 0; iteration < numDatanodes; iteration++) {
        String downnode = decommissionNode(client, iteration);
        dm.refreshNodes(conf);
        decommissionedNodes.add(downnode);
        BlockManagerTestUtil.recheckDecommissionState(dm);
        final List<DatanodeDescriptor> decommissioningNodes = dm.getDecommissioningNodes();
        if (iteration == 0) {
            assertEquals(decommissioningNodes.size(), 1);
            DatanodeDescriptor decommNode = decommissioningNodes.get(0);
            checkDecommissionStatus(decommNode, 3, 0, 1);
            checkDFSAdminDecommissionStatus(decommissioningNodes.subList(0, 1), fileSys, admin);
        } else {
            assertEquals(decommissioningNodes.size(), 2);
            DatanodeDescriptor decommNode1 = decommissioningNodes.get(0);
            DatanodeDescriptor decommNode2 = decommissioningNodes.get(1);
            // This one is still 3,3,1 since it passed over the UC block 
            // earlier, before node 2 was decommed
            checkDecommissionStatus(decommNode1, 3, 3, 1);
            // This one is 4,4,2 since it has the full state
            checkDecommissionStatus(decommNode2, 4, 4, 2);
            checkDFSAdminDecommissionStatus(decommissioningNodes.subList(0, 2), fileSys, admin);
        }
    }
    // Call refreshNodes on FSNamesystem with empty exclude file.
    // This will remove the datanodes from decommissioning list and
    // make them available again.
    hostsFileWriter.initExcludeHost("");
    dm.refreshNodes(conf);
    st1.close();
    AdminStatesBaseTest.cleanupFile(fileSys, file1);
    AdminStatesBaseTest.cleanupFile(fileSys, file2);
}
Also used : DFSClient(org.apache.hadoop.hdfs.DFSClient) Path(org.apache.hadoop.fs.Path) DatanodeInfo(org.apache.hadoop.hdfs.protocol.DatanodeInfo) InetSocketAddress(java.net.InetSocketAddress) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) DatanodeDescriptor(org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor) DatanodeManager(org.apache.hadoop.hdfs.server.blockmanagement.DatanodeManager) DataNode(org.apache.hadoop.hdfs.server.datanode.DataNode) DFSAdmin(org.apache.hadoop.hdfs.tools.DFSAdmin) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream) Test(org.junit.Test) AdminStatesBaseTest(org.apache.hadoop.hdfs.AdminStatesBaseTest)

Example 82 with DFSClient

use of org.apache.hadoop.hdfs.DFSClient in project hadoop by apache.

the class TestINodeFile method testDotdotInodePath.

@Test
public void testDotdotInodePath() throws Exception {
    final Configuration conf = new Configuration();
    MiniDFSCluster cluster = null;
    DFSClient client = null;
    try {
        cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
        cluster.waitActive();
        final DistributedFileSystem hdfs = cluster.getFileSystem();
        final FSDirectory fsdir = cluster.getNamesystem().getFSDirectory();
        final Path dir = new Path("/dir");
        hdfs.mkdirs(dir);
        long dirId = fsdir.getINode(dir.toString()).getId();
        long parentId = fsdir.getINode("/").getId();
        String testPath = "/.reserved/.inodes/" + dirId + "/..";
        client = new DFSClient(DFSUtilClient.getNNAddress(conf), conf);
        HdfsFileStatus status = client.getFileInfo(testPath);
        assertTrue(parentId == status.getFileId());
        // Test root's parent is still root
        testPath = "/.reserved/.inodes/" + parentId + "/..";
        status = client.getFileInfo(testPath);
        assertTrue(parentId == status.getFileId());
    } finally {
        IOUtils.cleanup(LOG, client);
        if (cluster != null) {
            cluster.shutdown();
        }
    }
}
Also used : DFSClient(org.apache.hadoop.hdfs.DFSClient) Path(org.apache.hadoop.fs.Path) MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) Configuration(org.apache.hadoop.conf.Configuration) HdfsFileStatus(org.apache.hadoop.hdfs.protocol.HdfsFileStatus) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) Test(org.junit.Test)

Example 83 with DFSClient

use of org.apache.hadoop.hdfs.DFSClient in project hadoop by apache.

the class TestFsck method testUnderMinReplicatedBlock.

@Test
public void testUnderMinReplicatedBlock() throws Exception {
    conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 1000);
    // Set short retry timeouts so this test runs faster
    conf.setInt(HdfsClientConfigKeys.Retry.WINDOW_BASE_KEY, 10);
    // Set minReplication to 2
    short minReplication = 2;
    conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_MIN_KEY, minReplication);
    FileSystem fs = null;
    DFSClient dfsClient = null;
    LocatedBlocks blocks = null;
    int replicaCount = 0;
    Random random = new Random();
    String outStr = null;
    short factor = 1;
    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
    cluster.waitActive();
    fs = cluster.getFileSystem();
    Path file1 = new Path("/testUnderMinReplicatedBlock");
    DFSTestUtil.createFile(fs, file1, 1024, minReplication, 0);
    // Wait until file replication has completed
    DFSTestUtil.waitReplication(fs, file1, minReplication);
    ExtendedBlock block = DFSTestUtil.getFirstBlock(fs, file1);
    // Make sure filesystem is in healthy state
    outStr = runFsck(conf, 0, true, "/");
    System.out.println(outStr);
    assertTrue(outStr.contains(NamenodeFsck.HEALTHY_STATUS));
    // corrupt the first replica
    File blockFile = cluster.getBlockFile(0, block);
    if (blockFile != null && blockFile.exists()) {
        RandomAccessFile raFile = new RandomAccessFile(blockFile, "rw");
        FileChannel channel = raFile.getChannel();
        String badString = "BADBAD";
        int rand = random.nextInt((int) channel.size() / 2);
        raFile.seek(rand);
        raFile.write(badString.getBytes());
        raFile.close();
    }
    dfsClient = new DFSClient(new InetSocketAddress("localhost", cluster.getNameNodePort()), conf);
    blocks = dfsClient.getNamenode().getBlockLocations(file1.toString(), 0, Long.MAX_VALUE);
    replicaCount = blocks.get(0).getLocations().length;
    while (replicaCount != factor) {
        try {
            Thread.sleep(100);
            // Read the file to trigger reportBadBlocks
            try {
                IOUtils.copyBytes(fs.open(file1), new IOUtils.NullOutputStream(), conf, true);
            } catch (IOException ie) {
                assertTrue(ie instanceof ChecksumException);
            }
            System.out.println("sleep in try: replicaCount=" + replicaCount + "  factor=" + factor);
        } catch (InterruptedException ignore) {
        }
        blocks = dfsClient.getNamenode().getBlockLocations(file1.toString(), 0, Long.MAX_VALUE);
        replicaCount = blocks.get(0).getLocations().length;
    }
    // Check if fsck reports the same
    outStr = runFsck(conf, 0, true, "/");
    System.out.println(outStr);
    assertTrue(outStr.contains(NamenodeFsck.HEALTHY_STATUS));
    assertTrue(outStr.contains("UNDER MIN REPL'D BLOCKS:\t1 (100.0 %)"));
    assertTrue(outStr.contains("dfs.namenode.replication.min:\t2"));
}
Also used : DFSClient(org.apache.hadoop.hdfs.DFSClient) Path(org.apache.hadoop.fs.Path) MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) FileChannel(java.nio.channels.FileChannel) InetSocketAddress(java.net.InetSocketAddress) ChecksumException(org.apache.hadoop.fs.ChecksumException) LocatedBlocks(org.apache.hadoop.hdfs.protocol.LocatedBlocks) ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) Matchers.anyString(org.mockito.Matchers.anyString) IOException(java.io.IOException) IOUtils(org.apache.hadoop.io.IOUtils) Random(java.util.Random) RandomAccessFile(java.io.RandomAccessFile) FileSystem(org.apache.hadoop.fs.FileSystem) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) RandomAccessFile(java.io.RandomAccessFile) File(java.io.File) Test(org.junit.Test)

Example 84 with DFSClient

use of org.apache.hadoop.hdfs.DFSClient in project hadoop by apache.

the class TestFsck method testFsckMoveAndDelete.

@Test
public void testFsckMoveAndDelete() throws Exception {
    final int maxMoveTries = 5;
    DFSTestUtil util = new DFSTestUtil.Builder().setName("TestFsckMoveAndDelete").setNumFiles(5).build();
    FileSystem fs = null;
    conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 10000L);
    conf.setInt(DFSConfigKeys.DFS_DATANODE_DIRECTORYSCAN_INTERVAL_KEY, 1);
    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(4).build();
    String topDir = "/srcdat";
    fs = cluster.getFileSystem();
    cluster.waitActive();
    util.createFiles(fs, topDir);
    util.waitReplication(fs, topDir, (short) 3);
    String outStr = runFsck(conf, 0, true, "/");
    assertTrue(outStr.contains(NamenodeFsck.HEALTHY_STATUS));
    // Corrupt a block by deleting it
    String[] fileNames = util.getFileNames(topDir);
    DFSClient dfsClient = new DFSClient(new InetSocketAddress("localhost", cluster.getNameNodePort()), conf);
    String corruptFileName = fileNames[0];
    ExtendedBlock block = dfsClient.getNamenode().getBlockLocations(corruptFileName, 0, Long.MAX_VALUE).get(0).getBlock();
    for (int i = 0; i < 4; i++) {
        File blockFile = cluster.getBlockFile(i, block);
        if (blockFile != null && blockFile.exists()) {
            assertTrue(blockFile.delete());
        }
    }
    // We excpect the filesystem to be corrupted
    outStr = runFsck(conf, 1, false, "/");
    while (!outStr.contains(NamenodeFsck.CORRUPT_STATUS)) {
        try {
            Thread.sleep(100);
        } catch (InterruptedException ignore) {
        }
        outStr = runFsck(conf, 1, false, "/");
    }
    // After a fsck -move, the corrupted file should still exist.
    for (int i = 0; i < maxMoveTries; i++) {
        outStr = runFsck(conf, 1, true, "/", "-move");
        assertTrue(outStr.contains(NamenodeFsck.CORRUPT_STATUS));
        String[] newFileNames = util.getFileNames(topDir);
        boolean found = false;
        for (String f : newFileNames) {
            if (f.equals(corruptFileName)) {
                found = true;
                break;
            }
        }
        assertTrue(found);
    }
    // Fix the filesystem by moving corrupted files to lost+found
    outStr = runFsck(conf, 1, true, "/", "-move", "-delete");
    assertTrue(outStr.contains(NamenodeFsck.CORRUPT_STATUS));
    // Check to make sure we have healthy filesystem
    outStr = runFsck(conf, 0, true, "/");
    assertTrue(outStr.contains(NamenodeFsck.HEALTHY_STATUS));
    util.cleanup(fs, topDir);
}
Also used : DFSClient(org.apache.hadoop.hdfs.DFSClient) DFSTestUtil(org.apache.hadoop.hdfs.DFSTestUtil) MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) InetSocketAddress(java.net.InetSocketAddress) ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) Matchers.anyString(org.mockito.Matchers.anyString) FileSystem(org.apache.hadoop.fs.FileSystem) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) RandomAccessFile(java.io.RandomAccessFile) File(java.io.File) Test(org.junit.Test)

Example 85 with DFSClient

use of org.apache.hadoop.hdfs.DFSClient in project hadoop by apache.

the class TestFsck method testFsckMoveAfterCorruption.

@Test(timeout = 300000)
public void testFsckMoveAfterCorruption() throws Exception {
    final int dfsBlockSize = 512 * 1024;
    final int numDatanodes = 1;
    final int replication = 1;
    conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, dfsBlockSize);
    conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 1000L);
    conf.setInt(DFSConfigKeys.DFS_DATANODE_DIRECTORYSCAN_INTERVAL_KEY, 1);
    conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, replication);
    cluster = new MiniDFSCluster.Builder(conf).build();
    DistributedFileSystem dfs = cluster.getFileSystem();
    cluster.waitActive();
    final String srcDir = "/srcdat";
    final DFSTestUtil util = new DFSTestUtil.Builder().setName("TestFsck").setMinSize(dfsBlockSize * 2).setMaxSize(dfsBlockSize * 3).setNumFiles(1).build();
    util.createFiles(dfs, srcDir, (short) replication);
    final String[] fileNames = util.getFileNames(srcDir);
    LOG.info("Created files: " + Arrays.toString(fileNames));
    // Run fsck here. The output is automatically logged for easier debugging
    String outStr = runFsck(conf, 0, true, "/", "-files", "-blocks");
    assertTrue(outStr.contains(NamenodeFsck.HEALTHY_STATUS));
    // Corrupt the first block
    final DFSClient dfsClient = new DFSClient(new InetSocketAddress("localhost", cluster.getNameNodePort()), conf);
    final String blockFileToCorrupt = fileNames[0];
    final CorruptedTestFile ctf = new CorruptedTestFile(blockFileToCorrupt, Sets.newHashSet(0), dfsClient, numDatanodes, dfsBlockSize);
    ctf.corruptBlocks(cluster);
    // Wait for fsck to discover all the missing blocks
    GenericTestUtils.waitFor(new Supplier<Boolean>() {

        @Override
        public Boolean get() {
            try {
                final String str = runFsck(conf, 1, false, "/");
                String numCorrupt = null;
                for (String line : str.split(LINE_SEPARATOR)) {
                    Matcher m = NUM_CORRUPT_BLOCKS_PATTERN.matcher(line);
                    if (m.matches()) {
                        numCorrupt = m.group(1);
                        break;
                    }
                }
                if (numCorrupt == null) {
                    Assert.fail("Cannot find corrupt blocks count in fsck output.");
                }
                if (Integer.parseInt(numCorrupt) == ctf.getTotalMissingBlocks()) {
                    assertTrue(str.contains(NamenodeFsck.CORRUPT_STATUS));
                    return true;
                }
            } catch (Exception e) {
                LOG.error("Exception caught", e);
                Assert.fail("Caught unexpected exception.");
            }
            return false;
        }
    }, 1000, 60000);
    runFsck(conf, 1, true, "/", "-files", "-blocks", "-racks");
    LOG.info("Moving blocks to lost+found");
    // Fsck will return error since we corrupted a block
    runFsck(conf, 1, false, "/", "-move");
    final List<LocatedFileStatus> retVal = new ArrayList<>();
    final RemoteIterator<LocatedFileStatus> iter = dfs.listFiles(new Path("/lost+found"), true);
    while (iter.hasNext()) {
        retVal.add(iter.next());
    }
    LOG.info("Items in lost+found: " + retVal);
    // Expect all good blocks moved, only corrupted block skipped.
    long totalLength = 0;
    for (LocatedFileStatus lfs : retVal) {
        totalLength += lfs.getLen();
    }
    Assert.assertTrue("Nothing is moved to lost+found!", totalLength > 0);
    util.cleanup(dfs, srcDir);
}
Also used : DFSClient(org.apache.hadoop.hdfs.DFSClient) Path(org.apache.hadoop.fs.Path) DFSTestUtil(org.apache.hadoop.hdfs.DFSTestUtil) Matcher(java.util.regex.Matcher) InetSocketAddress(java.net.InetSocketAddress) ArrayList(java.util.ArrayList) LocatedFileStatus(org.apache.hadoop.fs.LocatedFileStatus) Matchers.anyString(org.mockito.Matchers.anyString) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) IOException(java.io.IOException) ChecksumException(org.apache.hadoop.fs.ChecksumException) TimeoutException(java.util.concurrent.TimeoutException) UnresolvedLinkException(org.apache.hadoop.fs.UnresolvedLinkException) FileNotFoundException(java.io.FileNotFoundException) AccessControlException(org.apache.hadoop.security.AccessControlException) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) Test(org.junit.Test)

Aggregations

DFSClient (org.apache.hadoop.hdfs.DFSClient)97 Test (org.junit.Test)53 IOException (java.io.IOException)35 Nfs3FileAttributes (org.apache.hadoop.nfs.nfs3.Nfs3FileAttributes)27 FileHandle (org.apache.hadoop.nfs.nfs3.FileHandle)26 VisibleForTesting (com.google.common.annotations.VisibleForTesting)18 Path (org.apache.hadoop.fs.Path)18 DistributedFileSystem (org.apache.hadoop.hdfs.DistributedFileSystem)17 InetSocketAddress (java.net.InetSocketAddress)13 MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)13 Configuration (org.apache.hadoop.conf.Configuration)12 NfsConfiguration (org.apache.hadoop.hdfs.nfs.conf.NfsConfiguration)12 FileSystem (org.apache.hadoop.fs.FileSystem)11 HdfsFileStatus (org.apache.hadoop.hdfs.protocol.HdfsFileStatus)11 HdfsDataOutputStream (org.apache.hadoop.hdfs.client.HdfsDataOutputStream)9 WccData (org.apache.hadoop.nfs.nfs3.response.WccData)9 ShellBasedIdMapping (org.apache.hadoop.security.ShellBasedIdMapping)8 ExtendedBlock (org.apache.hadoop.hdfs.protocol.ExtendedBlock)7 LocatedBlock (org.apache.hadoop.hdfs.protocol.LocatedBlock)7 ArrayList (java.util.ArrayList)6