Search in sources :

Example 11 with NamenodeProtocols

use of org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols in project hadoop by apache.

the class TestFileAppend4 method testRecoverFinalizedBlock.

/**
   * Test case that stops a writer after finalizing a block but
   * before calling completeFile, and then tries to recover
   * the lease from another thread.
   */
@Test(timeout = 60000)
public void testRecoverFinalizedBlock() throws Throwable {
    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(5).build();
    try {
        cluster.waitActive();
        NamenodeProtocols preSpyNN = cluster.getNameNodeRpc();
        NamenodeProtocols spyNN = spy(preSpyNN);
        // Delay completeFile
        GenericTestUtils.DelayAnswer delayer = new GenericTestUtils.DelayAnswer(LOG);
        doAnswer(delayer).when(spyNN).complete(anyString(), anyString(), (ExtendedBlock) anyObject(), anyLong());
        DFSClient client = new DFSClient(null, spyNN, conf, null);
        file1 = new Path("/testRecoverFinalized");
        final OutputStream stm = client.create("/testRecoverFinalized", true);
        // write 1/2 block
        AppendTestUtil.write(stm, 0, 4096);
        final AtomicReference<Throwable> err = new AtomicReference<Throwable>();
        Thread t = new Thread() {

            @Override
            public void run() {
                try {
                    stm.close();
                } catch (Throwable t) {
                    err.set(t);
                }
            }
        };
        t.start();
        LOG.info("Waiting for close to get to latch...");
        delayer.waitForCall();
        // At this point, the block is finalized on the DNs, but the file
        // has not been completed in the NN.
        // Lose the leases
        LOG.info("Killing lease checker");
        client.getLeaseRenewer().interruptAndJoin();
        FileSystem fs1 = cluster.getFileSystem();
        FileSystem fs2 = AppendTestUtil.createHdfsWithDifferentUsername(fs1.getConf());
        LOG.info("Recovering file");
        recoverFile(fs2);
        LOG.info("Telling close to proceed.");
        delayer.proceed();
        LOG.info("Waiting for close to finish.");
        t.join();
        LOG.info("Close finished.");
        // We expect that close will get a "File is not open" error.
        Throwable thrownByClose = err.get();
        assertNotNull(thrownByClose);
        assertTrue(thrownByClose instanceof LeaseExpiredException);
        GenericTestUtils.assertExceptionContains("File is not open for writing", thrownByClose);
    } finally {
        cluster.shutdown();
    }
}
Also used : Path(org.apache.hadoop.fs.Path) NamenodeProtocols(org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream) OutputStream(java.io.OutputStream) GenericTestUtils(org.apache.hadoop.test.GenericTestUtils) AtomicReference(java.util.concurrent.atomic.AtomicReference) LeaseExpiredException(org.apache.hadoop.hdfs.server.namenode.LeaseExpiredException) FileSystem(org.apache.hadoop.fs.FileSystem) Test(org.junit.Test)

Example 12 with NamenodeProtocols

use of org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols in project hadoop by apache.

the class TestFsck method testFsckListCorruptFilesBlocks.

/** check if option -list-corruptfiles of fsck command works properly. */
@Test
public void testFsckListCorruptFilesBlocks() throws Exception {
    conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 1000);
    conf.setInt(DFSConfigKeys.DFS_DATANODE_DIRECTORYSCAN_INTERVAL_KEY, 1);
    FileSystem fs = null;
    cluster = new MiniDFSCluster.Builder(conf).build();
    cluster.waitActive();
    fs = cluster.getFileSystem();
    DFSTestUtil util = new DFSTestUtil.Builder().setName("testGetCorruptFiles").setNumFiles(3).setMaxLevels(1).setMaxSize(1024).build();
    util.createFiles(fs, "/corruptData", (short) 1);
    util.waitReplication(fs, "/corruptData", (short) 1);
    String outStr = runFsck(conf, 0, false, "/corruptData", "-list-corruptfileblocks");
    System.out.println("1. good fsck out: " + outStr);
    assertTrue(outStr.contains("has 0 CORRUPT files"));
    // delete the blocks
    final String bpid = cluster.getNamesystem().getBlockPoolId();
    for (int i = 0; i < 4; i++) {
        for (int j = 0; j <= 1; j++) {
            File storageDir = cluster.getInstanceStorageDir(i, j);
            File dataDir = MiniDFSCluster.getFinalizedDir(storageDir, bpid);
            List<File> metadataFiles = MiniDFSCluster.getAllBlockMetadataFiles(dataDir);
            if (metadataFiles == null) {
                continue;
            }
            for (File metadataFile : metadataFiles) {
                File blockFile = Block.metaToBlockFile(metadataFile);
                assertTrue("Cannot remove file.", blockFile.delete());
                assertTrue("Cannot remove file.", metadataFile.delete());
            }
        }
    }
    // wait for the namenode to see the corruption
    final NamenodeProtocols namenode = cluster.getNameNodeRpc();
    CorruptFileBlocks corruptFileBlocks = namenode.listCorruptFileBlocks("/corruptData", null);
    int numCorrupt = corruptFileBlocks.getFiles().length;
    while (numCorrupt == 0) {
        Thread.sleep(1000);
        corruptFileBlocks = namenode.listCorruptFileBlocks("/corruptData", null);
        numCorrupt = corruptFileBlocks.getFiles().length;
    }
    outStr = runFsck(conf, -1, true, "/corruptData", "-list-corruptfileblocks");
    System.out.println("2. bad fsck out: " + outStr);
    assertTrue(outStr.contains("has 3 CORRUPT files"));
    // Do a listing on a dir which doesn't have any corrupt blocks and validate
    util.createFiles(fs, "/goodData");
    outStr = runFsck(conf, 0, true, "/goodData", "-list-corruptfileblocks");
    System.out.println("3. good fsck out: " + outStr);
    assertTrue(outStr.contains("has 0 CORRUPT files"));
    util.cleanup(fs, "/corruptData");
    util.cleanup(fs, "/goodData");
}
Also used : DFSTestUtil(org.apache.hadoop.hdfs.DFSTestUtil) NamenodeProtocols(org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols) CorruptFileBlocks(org.apache.hadoop.hdfs.protocol.CorruptFileBlocks) FileSystem(org.apache.hadoop.fs.FileSystem) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) Matchers.anyString(org.mockito.Matchers.anyString) RandomAccessFile(java.io.RandomAccessFile) File(java.io.File) Test(org.junit.Test)

Example 13 with NamenodeProtocols

use of org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols in project hadoop by apache.

the class TestINodeFile method testInodeIdBasedPaths.

/**
   * Tests for addressing files using /.reserved/.inodes/<inodeID> in file system
   * operations.
   */
@Test
public void testInodeIdBasedPaths() throws Exception {
    Configuration conf = new Configuration();
    conf.setInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_DEFAULT);
    conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY, true);
    MiniDFSCluster cluster = null;
    try {
        cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
        cluster.waitActive();
        DistributedFileSystem fs = cluster.getFileSystem();
        NamenodeProtocols nnRpc = cluster.getNameNodeRpc();
        // FileSystem#mkdirs "/testInodeIdBasedPaths"
        Path baseDir = getInodePath(INodeId.ROOT_INODE_ID, "testInodeIdBasedPaths");
        Path baseDirRegPath = new Path("/testInodeIdBasedPaths");
        fs.mkdirs(baseDir);
        fs.exists(baseDir);
        long baseDirFileId = nnRpc.getFileInfo(baseDir.toString()).getFileId();
        // FileSystem#create file and FileSystem#close
        Path testFileInodePath = getInodePath(baseDirFileId, "test1");
        Path testFileRegularPath = new Path(baseDir, "test1");
        final int testFileBlockSize = 1024;
        FileSystemTestHelper.createFile(fs, testFileInodePath, 1, testFileBlockSize);
        assertTrue(fs.exists(testFileInodePath));
        // FileSystem#setPermission
        FsPermission perm = new FsPermission((short) 0666);
        fs.setPermission(testFileInodePath, perm);
        // FileSystem#getFileStatus and FileSystem#getPermission
        FileStatus fileStatus = fs.getFileStatus(testFileInodePath);
        assertEquals(perm, fileStatus.getPermission());
        // FileSystem#setOwner
        fs.setOwner(testFileInodePath, fileStatus.getOwner(), fileStatus.getGroup());
        // FileSystem#setTimes
        fs.setTimes(testFileInodePath, 0, 0);
        fileStatus = fs.getFileStatus(testFileInodePath);
        assertEquals(0, fileStatus.getModificationTime());
        assertEquals(0, fileStatus.getAccessTime());
        // FileSystem#setReplication
        fs.setReplication(testFileInodePath, (short) 3);
        fileStatus = fs.getFileStatus(testFileInodePath);
        assertEquals(3, fileStatus.getReplication());
        fs.setReplication(testFileInodePath, (short) 1);
        // ClientProtocol#getPreferredBlockSize
        assertEquals(testFileBlockSize, nnRpc.getPreferredBlockSize(testFileInodePath.toString()));
        /*
       * HDFS-6749 added missing calls to FSDirectory.resolvePath in the
       * following four methods. The calls below ensure that
       * /.reserved/.inodes paths work properly. No need to check return
       * values as these methods are tested elsewhere.
       */
        {
            fs.isFileClosed(testFileInodePath);
            fs.getAclStatus(testFileInodePath);
            fs.getXAttrs(testFileInodePath);
            fs.listXAttrs(testFileInodePath);
            fs.access(testFileInodePath, FsAction.READ_WRITE);
        }
        // symbolic link related tests
        // Reserved path is not allowed as a target
        String invalidTarget = new Path(baseDir, "invalidTarget").toString();
        String link = new Path(baseDir, "link").toString();
        testInvalidSymlinkTarget(nnRpc, invalidTarget, link);
        // Test creating a link using reserved inode path
        String validTarget = "/validtarget";
        testValidSymlinkTarget(nnRpc, validTarget, link);
        // FileSystem#append
        fs.append(testFileInodePath);
        // DistributedFileSystem#recoverLease
        fs.recoverLease(testFileInodePath);
        // Namenode#getBlockLocations
        LocatedBlocks l1 = nnRpc.getBlockLocations(testFileInodePath.toString(), 0, Long.MAX_VALUE);
        LocatedBlocks l2 = nnRpc.getBlockLocations(testFileRegularPath.toString(), 0, Long.MAX_VALUE);
        checkEquals(l1, l2);
        // FileSystem#rename - both the variants
        Path renameDst = getInodePath(baseDirFileId, "test2");
        fileStatus = fs.getFileStatus(testFileInodePath);
        // Rename variant 1: rename and rename bacck
        fs.rename(testFileInodePath, renameDst);
        fs.rename(renameDst, testFileInodePath);
        assertEquals(fileStatus, fs.getFileStatus(testFileInodePath));
        // Rename variant 2: rename and rename bacck
        fs.rename(testFileInodePath, renameDst, Rename.OVERWRITE);
        fs.rename(renameDst, testFileInodePath, Rename.OVERWRITE);
        assertEquals(fileStatus, fs.getFileStatus(testFileInodePath));
        // FileSystem#getContentSummary
        assertEquals(fs.getContentSummary(testFileRegularPath).toString(), fs.getContentSummary(testFileInodePath).toString());
        // FileSystem#listFiles
        checkEquals(fs.listFiles(baseDirRegPath, false), fs.listFiles(baseDir, false));
        // FileSystem#delete
        fs.delete(testFileInodePath, true);
        assertFalse(fs.exists(testFileInodePath));
    } finally {
        if (cluster != null) {
            cluster.shutdown();
        }
    }
}
Also used : Path(org.apache.hadoop.fs.Path) NamenodeProtocols(org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols) MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) FileStatus(org.apache.hadoop.fs.FileStatus) LocatedFileStatus(org.apache.hadoop.fs.LocatedFileStatus) HdfsFileStatus(org.apache.hadoop.hdfs.protocol.HdfsFileStatus) Configuration(org.apache.hadoop.conf.Configuration) LocatedBlocks(org.apache.hadoop.hdfs.protocol.LocatedBlocks) FsPermission(org.apache.hadoop.fs.permission.FsPermission) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) Test(org.junit.Test)

Example 14 with NamenodeProtocols

use of org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols in project hadoop by apache.

the class TestINodeFile method testInodeId.

/**
   * This test verifies inode ID counter and inode map functionality.
   */
@Test
public void testInodeId() throws IOException {
    Configuration conf = new Configuration();
    conf.setInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_DEFAULT);
    MiniDFSCluster cluster = null;
    try {
        cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
        cluster.waitActive();
        FSNamesystem fsn = cluster.getNamesystem();
        long lastId = fsn.dir.getLastInodeId();
        // Ensure root has the correct inode ID
        // Last inode ID should be root inode ID and inode map size should be 1
        int inodeCount = 1;
        long expectedLastInodeId = INodeId.ROOT_INODE_ID;
        assertEquals(fsn.dir.rootDir.getId(), INodeId.ROOT_INODE_ID);
        assertEquals(expectedLastInodeId, lastId);
        assertEquals(inodeCount, fsn.dir.getInodeMapSize());
        // Create a directory
        // Last inode ID and inode map size should increase by 1
        FileSystem fs = cluster.getFileSystem();
        Path path = new Path("/test1");
        assertTrue(fs.mkdirs(path));
        assertEquals(++expectedLastInodeId, fsn.dir.getLastInodeId());
        assertEquals(++inodeCount, fsn.dir.getInodeMapSize());
        // Create a file
        // Last inode ID and inode map size should increase by 1
        NamenodeProtocols nnrpc = cluster.getNameNodeRpc();
        DFSTestUtil.createFile(fs, new Path("/test1/file"), 1024, (short) 1, 0);
        assertEquals(++expectedLastInodeId, fsn.dir.getLastInodeId());
        assertEquals(++inodeCount, fsn.dir.getInodeMapSize());
        // Ensure right inode ID is returned in file status
        HdfsFileStatus fileStatus = nnrpc.getFileInfo("/test1/file");
        assertEquals(expectedLastInodeId, fileStatus.getFileId());
        // Rename a directory
        // Last inode ID and inode map size should not change
        Path renamedPath = new Path("/test2");
        assertTrue(fs.rename(path, renamedPath));
        assertEquals(expectedLastInodeId, fsn.dir.getLastInodeId());
        assertEquals(inodeCount, fsn.dir.getInodeMapSize());
        // Delete test2/file and test2 and ensure inode map size decreases
        assertTrue(fs.delete(renamedPath, true));
        inodeCount -= 2;
        assertEquals(inodeCount, fsn.dir.getInodeMapSize());
        // Create and concat /test/file1 /test/file2
        // Create /test1/file1 and /test1/file2
        String file1 = "/test1/file1";
        String file2 = "/test1/file2";
        DFSTestUtil.createFile(fs, new Path(file1), 512, (short) 1, 0);
        DFSTestUtil.createFile(fs, new Path(file2), 512, (short) 1, 0);
        // test1, file1 and file2 are created
        inodeCount += 3;
        expectedLastInodeId += 3;
        assertEquals(inodeCount, fsn.dir.getInodeMapSize());
        assertEquals(expectedLastInodeId, fsn.dir.getLastInodeId());
        // Concat the /test1/file1 /test1/file2 into /test1/file2
        nnrpc.concat(file2, new String[] { file1 });
        // file1 and file2 are concatenated to file2
        inodeCount--;
        assertEquals(inodeCount, fsn.dir.getInodeMapSize());
        assertEquals(expectedLastInodeId, fsn.dir.getLastInodeId());
        assertTrue(fs.delete(new Path("/test1"), true));
        // test1 and file2 is deleted
        inodeCount -= 2;
        assertEquals(inodeCount, fsn.dir.getInodeMapSize());
        // Make sure editlog is loaded correctly 
        cluster.restartNameNode();
        cluster.waitActive();
        fsn = cluster.getNamesystem();
        assertEquals(expectedLastInodeId, fsn.dir.getLastInodeId());
        assertEquals(inodeCount, fsn.dir.getInodeMapSize());
        // Create two inodes test2 and test2/file2
        DFSTestUtil.createFile(fs, new Path("/test2/file2"), 1024, (short) 1, 0);
        expectedLastInodeId += 2;
        inodeCount += 2;
        assertEquals(expectedLastInodeId, fsn.dir.getLastInodeId());
        assertEquals(inodeCount, fsn.dir.getInodeMapSize());
        // create /test3, and /test3/file.
        // /test3/file is a file under construction
        FSDataOutputStream outStream = fs.create(new Path("/test3/file"));
        assertTrue(outStream != null);
        expectedLastInodeId += 2;
        inodeCount += 2;
        assertEquals(expectedLastInodeId, fsn.dir.getLastInodeId());
        assertEquals(inodeCount, fsn.dir.getInodeMapSize());
        // Apply editlogs to fsimage, ensure inodeUnderConstruction is handled
        fsn.enterSafeMode(false);
        fsn.saveNamespace(0, 0);
        fsn.leaveSafeMode(false);
        outStream.close();
        // The lastInodeId in fsimage should remain the same after reboot
        cluster.restartNameNode();
        cluster.waitActive();
        fsn = cluster.getNamesystem();
        assertEquals(expectedLastInodeId, fsn.dir.getLastInodeId());
        assertEquals(inodeCount, fsn.dir.getInodeMapSize());
    } finally {
        if (cluster != null) {
            cluster.shutdown();
        }
    }
}
Also used : Path(org.apache.hadoop.fs.Path) NamenodeProtocols(org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols) MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) Configuration(org.apache.hadoop.conf.Configuration) FileSystem(org.apache.hadoop.fs.FileSystem) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) HdfsFileStatus(org.apache.hadoop.hdfs.protocol.HdfsFileStatus) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream) Test(org.junit.Test)

Example 15 with NamenodeProtocols

use of org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols in project hadoop by apache.

the class TestLease method testLeaseAbort.

@Test
public void testLeaseAbort() throws Exception {
    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
    try {
        cluster.waitActive();
        NamenodeProtocols preSpyNN = cluster.getNameNodeRpc();
        NamenodeProtocols spyNN = spy(preSpyNN);
        DFSClient dfs = new DFSClient(null, spyNN, conf, null);
        byte[] buf = new byte[1024];
        FSDataOutputStream c_out = createFsOut(dfs, dirString + "c");
        c_out.write(buf, 0, 1024);
        c_out.close();
        DFSInputStream c_in = dfs.open(dirString + "c");
        FSDataOutputStream d_out = createFsOut(dfs, dirString + "d");
        // stub the renew method.
        doThrow(new RemoteException(InvalidToken.class.getName(), "Your token is worthless")).when(spyNN).renewLease(anyString());
        // We don't need to wait the lease renewer thread to act.
        // call renewLease() manually.
        // make it look like the soft limit has been exceeded.
        LeaseRenewer originalRenewer = dfs.getLeaseRenewer();
        dfs.lastLeaseRenewal = Time.monotonicNow() - HdfsConstants.LEASE_SOFTLIMIT_PERIOD - 1000;
        try {
            dfs.renewLease();
        } catch (IOException e) {
        }
        // renewing.
        try {
            d_out.write(buf, 0, 1024);
            LOG.info("Write worked beyond the soft limit as expected.");
        } catch (IOException e) {
            Assert.fail("Write failed.");
        }
        // make it look like the hard limit has been exceeded.
        dfs.lastLeaseRenewal = Time.monotonicNow() - HdfsConstants.LEASE_HARDLIMIT_PERIOD - 1000;
        dfs.renewLease();
        // this should not work.
        try {
            d_out.write(buf, 0, 1024);
            d_out.close();
            Assert.fail("Write did not fail even after the fatal lease renewal failure");
        } catch (IOException e) {
            LOG.info("Write failed as expected. ", e);
        }
        // If aborted, the renewer should be empty. (no reference to clients)
        Thread.sleep(1000);
        Assert.assertTrue(originalRenewer.isEmpty());
        // unstub
        doNothing().when(spyNN).renewLease(anyString());
        // existing input streams should work
        try {
            int num = c_in.read(buf, 0, 1);
            if (num != 1) {
                Assert.fail("Failed to read 1 byte");
            }
            c_in.close();
        } catch (IOException e) {
            LOG.error("Read failed with ", e);
            Assert.fail("Read after lease renewal failure failed");
        }
        // new file writes should work.
        try {
            c_out = createFsOut(dfs, dirString + "c");
            c_out.write(buf, 0, 1024);
            c_out.close();
        } catch (IOException e) {
            LOG.error("Write failed with ", e);
            Assert.fail("Write failed");
        }
    } finally {
        cluster.shutdown();
    }
}
Also used : NamenodeProtocols(org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols) LeaseRenewer(org.apache.hadoop.hdfs.client.impl.LeaseRenewer) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream) IOException(java.io.IOException) RemoteException(org.apache.hadoop.ipc.RemoteException) Test(org.junit.Test)

Aggregations

NamenodeProtocols (org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols)54 Test (org.junit.Test)45 IOException (java.io.IOException)24 Configuration (org.apache.hadoop.conf.Configuration)21 Path (org.apache.hadoop.fs.Path)19 FileSystem (org.apache.hadoop.fs.FileSystem)16 MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)15 HdfsConfiguration (org.apache.hadoop.hdfs.HdfsConfiguration)12 RemoteException (org.apache.hadoop.ipc.RemoteException)10 FSDataOutputStream (org.apache.hadoop.fs.FSDataOutputStream)9 LocatedBlock (org.apache.hadoop.hdfs.protocol.LocatedBlock)9 File (java.io.File)8 FileNotFoundException (java.io.FileNotFoundException)8 DistributedFileSystem (org.apache.hadoop.hdfs.DistributedFileSystem)8 DatanodeInfo (org.apache.hadoop.hdfs.protocol.DatanodeInfo)7 StandbyException (org.apache.hadoop.ipc.StandbyException)7 EOFException (java.io.EOFException)6 ConnectException (java.net.ConnectException)6 URISyntaxException (java.net.URISyntaxException)6 ExtendedBlock (org.apache.hadoop.hdfs.protocol.ExtendedBlock)6