Search in sources :

Example 96 with FSDataOutputStream

use of org.apache.hadoop.fs.FSDataOutputStream in project hadoop by apache.

the class TestLeaseManager method testLeaseRestorationOnRestart.

/**
   * Make sure the lease is restored even if only the inode has the record.
   */
@Test
public void testLeaseRestorationOnRestart() throws Exception {
    MiniDFSCluster cluster = null;
    try {
        cluster = new MiniDFSCluster.Builder(new HdfsConfiguration()).numDataNodes(1).build();
        DistributedFileSystem dfs = cluster.getFileSystem();
        // Create an empty file
        String path = "/testLeaseRestorationOnRestart";
        FSDataOutputStream out = dfs.create(new Path(path));
        // Remove the lease from the lease manager, but leave it in the inode.
        FSDirectory dir = cluster.getNamesystem().getFSDirectory();
        INodeFile file = dir.getINode(path).asFile();
        cluster.getNamesystem().leaseManager.removeLease(file.getFileUnderConstructionFeature().getClientName(), file);
        // Save a fsimage.
        dfs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
        cluster.getNameNodeRpc().saveNamespace(0, 0);
        dfs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
        // Restart the namenode.
        cluster.restartNameNode(true);
        // Check whether the lease manager has the lease
        dir = cluster.getNamesystem().getFSDirectory();
        file = dir.getINode(path).asFile();
        assertTrue("Lease should exist.", cluster.getNamesystem().leaseManager.getLease(file) != null);
    } finally {
        if (cluster != null) {
            cluster.shutdown();
        }
    }
}
Also used : Path(org.apache.hadoop.fs.Path) MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) Test(org.junit.Test)

Example 97 with FSDataOutputStream

use of org.apache.hadoop.fs.FSDataOutputStream in project hadoop by apache.

the class TestINodeFile method testInodeId.

/**
   * This test verifies inode ID counter and inode map functionality.
   */
@Test
public void testInodeId() throws IOException {
    Configuration conf = new Configuration();
    conf.setInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_DEFAULT);
    MiniDFSCluster cluster = null;
    try {
        cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
        cluster.waitActive();
        FSNamesystem fsn = cluster.getNamesystem();
        long lastId = fsn.dir.getLastInodeId();
        // Ensure root has the correct inode ID
        // Last inode ID should be root inode ID and inode map size should be 1
        int inodeCount = 1;
        long expectedLastInodeId = INodeId.ROOT_INODE_ID;
        assertEquals(fsn.dir.rootDir.getId(), INodeId.ROOT_INODE_ID);
        assertEquals(expectedLastInodeId, lastId);
        assertEquals(inodeCount, fsn.dir.getInodeMapSize());
        // Create a directory
        // Last inode ID and inode map size should increase by 1
        FileSystem fs = cluster.getFileSystem();
        Path path = new Path("/test1");
        assertTrue(fs.mkdirs(path));
        assertEquals(++expectedLastInodeId, fsn.dir.getLastInodeId());
        assertEquals(++inodeCount, fsn.dir.getInodeMapSize());
        // Create a file
        // Last inode ID and inode map size should increase by 1
        NamenodeProtocols nnrpc = cluster.getNameNodeRpc();
        DFSTestUtil.createFile(fs, new Path("/test1/file"), 1024, (short) 1, 0);
        assertEquals(++expectedLastInodeId, fsn.dir.getLastInodeId());
        assertEquals(++inodeCount, fsn.dir.getInodeMapSize());
        // Ensure right inode ID is returned in file status
        HdfsFileStatus fileStatus = nnrpc.getFileInfo("/test1/file");
        assertEquals(expectedLastInodeId, fileStatus.getFileId());
        // Rename a directory
        // Last inode ID and inode map size should not change
        Path renamedPath = new Path("/test2");
        assertTrue(fs.rename(path, renamedPath));
        assertEquals(expectedLastInodeId, fsn.dir.getLastInodeId());
        assertEquals(inodeCount, fsn.dir.getInodeMapSize());
        // Delete test2/file and test2 and ensure inode map size decreases
        assertTrue(fs.delete(renamedPath, true));
        inodeCount -= 2;
        assertEquals(inodeCount, fsn.dir.getInodeMapSize());
        // Create and concat /test/file1 /test/file2
        // Create /test1/file1 and /test1/file2
        String file1 = "/test1/file1";
        String file2 = "/test1/file2";
        DFSTestUtil.createFile(fs, new Path(file1), 512, (short) 1, 0);
        DFSTestUtil.createFile(fs, new Path(file2), 512, (short) 1, 0);
        // test1, file1 and file2 are created
        inodeCount += 3;
        expectedLastInodeId += 3;
        assertEquals(inodeCount, fsn.dir.getInodeMapSize());
        assertEquals(expectedLastInodeId, fsn.dir.getLastInodeId());
        // Concat the /test1/file1 /test1/file2 into /test1/file2
        nnrpc.concat(file2, new String[] { file1 });
        // file1 and file2 are concatenated to file2
        inodeCount--;
        assertEquals(inodeCount, fsn.dir.getInodeMapSize());
        assertEquals(expectedLastInodeId, fsn.dir.getLastInodeId());
        assertTrue(fs.delete(new Path("/test1"), true));
        // test1 and file2 is deleted
        inodeCount -= 2;
        assertEquals(inodeCount, fsn.dir.getInodeMapSize());
        // Make sure editlog is loaded correctly 
        cluster.restartNameNode();
        cluster.waitActive();
        fsn = cluster.getNamesystem();
        assertEquals(expectedLastInodeId, fsn.dir.getLastInodeId());
        assertEquals(inodeCount, fsn.dir.getInodeMapSize());
        // Create two inodes test2 and test2/file2
        DFSTestUtil.createFile(fs, new Path("/test2/file2"), 1024, (short) 1, 0);
        expectedLastInodeId += 2;
        inodeCount += 2;
        assertEquals(expectedLastInodeId, fsn.dir.getLastInodeId());
        assertEquals(inodeCount, fsn.dir.getInodeMapSize());
        // create /test3, and /test3/file.
        // /test3/file is a file under construction
        FSDataOutputStream outStream = fs.create(new Path("/test3/file"));
        assertTrue(outStream != null);
        expectedLastInodeId += 2;
        inodeCount += 2;
        assertEquals(expectedLastInodeId, fsn.dir.getLastInodeId());
        assertEquals(inodeCount, fsn.dir.getInodeMapSize());
        // Apply editlogs to fsimage, ensure inodeUnderConstruction is handled
        fsn.enterSafeMode(false);
        fsn.saveNamespace(0, 0);
        fsn.leaveSafeMode(false);
        outStream.close();
        // The lastInodeId in fsimage should remain the same after reboot
        cluster.restartNameNode();
        cluster.waitActive();
        fsn = cluster.getNamesystem();
        assertEquals(expectedLastInodeId, fsn.dir.getLastInodeId());
        assertEquals(inodeCount, fsn.dir.getInodeMapSize());
    } finally {
        if (cluster != null) {
            cluster.shutdown();
        }
    }
}
Also used : Path(org.apache.hadoop.fs.Path) NamenodeProtocols(org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols) MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) Configuration(org.apache.hadoop.conf.Configuration) FileSystem(org.apache.hadoop.fs.FileSystem) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) HdfsFileStatus(org.apache.hadoop.hdfs.protocol.HdfsFileStatus) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream) Test(org.junit.Test)

Example 98 with FSDataOutputStream

use of org.apache.hadoop.fs.FSDataOutputStream in project hadoop by apache.

the class TestVLong method testVLongRandom.

@Test
public void testVLongRandom() throws IOException {
    int count = 1024 * 1024;
    long[] data = new long[count];
    Random rng = new Random();
    for (int i = 0; i < data.length; ++i) {
        int shift = rng.nextInt(Long.SIZE) + 1;
        long mask = (1L << shift) - 1;
        long a = ((long) rng.nextInt()) << 32;
        long b = ((long) rng.nextInt()) & 0xffffffffL;
        data[i] = (a + b) & mask;
    }
    FSDataOutputStream out = fs.create(path);
    for (int i = 0; i < data.length; ++i) {
        Utils.writeVLong(out, data[i]);
    }
    out.close();
    FSDataInputStream in = fs.open(path);
    for (int i = 0; i < data.length; ++i) {
        Assert.assertEquals(Utils.readVLong(in), data[i]);
    }
    in.close();
    fs.delete(path, false);
}
Also used : Random(java.util.Random) FSDataInputStream(org.apache.hadoop.fs.FSDataInputStream) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream) Test(org.junit.Test)

Example 99 with FSDataOutputStream

use of org.apache.hadoop.fs.FSDataOutputStream in project hadoop by apache.

the class TestVLong method testVLongByte.

@Test
public void testVLongByte() throws IOException {
    FSDataOutputStream out = fs.create(path);
    for (int i = Byte.MIN_VALUE; i <= Byte.MAX_VALUE; ++i) {
        Utils.writeVLong(out, i);
    }
    out.close();
    Assert.assertEquals("Incorrect encoded size", (1 << Byte.SIZE) + 96, fs.getFileStatus(path).getLen());
    FSDataInputStream in = fs.open(path);
    for (int i = Byte.MIN_VALUE; i <= Byte.MAX_VALUE; ++i) {
        long n = Utils.readVLong(in);
        Assert.assertEquals(n, i);
    }
    in.close();
    fs.delete(path, false);
}
Also used : FSDataInputStream(org.apache.hadoop.fs.FSDataInputStream) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream) Test(org.junit.Test)

Example 100 with FSDataOutputStream

use of org.apache.hadoop.fs.FSDataOutputStream in project hadoop by apache.

the class TestVLong method writeAndVerify.

private long writeAndVerify(int shift) throws IOException {
    FSDataOutputStream out = fs.create(path);
    for (int i = Short.MIN_VALUE; i <= Short.MAX_VALUE; ++i) {
        Utils.writeVLong(out, ((long) i) << shift);
    }
    out.close();
    FSDataInputStream in = fs.open(path);
    for (int i = Short.MIN_VALUE; i <= Short.MAX_VALUE; ++i) {
        long n = Utils.readVLong(in);
        Assert.assertEquals(n, ((long) i) << shift);
    }
    in.close();
    long ret = fs.getFileStatus(path).getLen();
    fs.delete(path, false);
    return ret;
}
Also used : FSDataInputStream(org.apache.hadoop.fs.FSDataInputStream) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream)

Aggregations

FSDataOutputStream (org.apache.hadoop.fs.FSDataOutputStream)789 Path (org.apache.hadoop.fs.Path)618 Test (org.junit.Test)345 FileSystem (org.apache.hadoop.fs.FileSystem)248 Configuration (org.apache.hadoop.conf.Configuration)190 IOException (java.io.IOException)163 FSDataInputStream (org.apache.hadoop.fs.FSDataInputStream)94 IgfsPath (org.apache.ignite.igfs.IgfsPath)78 DistributedFileSystem (org.apache.hadoop.hdfs.DistributedFileSystem)66 MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)65 FileStatus (org.apache.hadoop.fs.FileStatus)57 FsPermission (org.apache.hadoop.fs.permission.FsPermission)45 CreateFlag (org.apache.hadoop.fs.CreateFlag)43 FileNotFoundException (java.io.FileNotFoundException)40 HdfsConfiguration (org.apache.hadoop.hdfs.HdfsConfiguration)40 ArrayList (java.util.ArrayList)38 LocatedBlock (org.apache.hadoop.hdfs.protocol.LocatedBlock)33 LocatedBlocks (org.apache.hadoop.hdfs.protocol.LocatedBlocks)31 DatanodeInfo (org.apache.hadoop.hdfs.protocol.DatanodeInfo)30 Random (java.util.Random)28