Search in sources :

Example 91 with DistributedFileSystem

use of org.apache.hadoop.hdfs.DistributedFileSystem in project hadoop by apache.

the class TestEnhancedByteBufferAccess method test2GBMmapLimit.

@Test
public void test2GBMmapLimit() throws Exception {
    Assume.assumeTrue(BlockReaderTestUtil.shouldTestLargeFiles());
    HdfsConfiguration conf = initZeroCopyTest();
    final long TEST_FILE_LENGTH = 2469605888L;
    conf.set(DFSConfigKeys.DFS_CHECKSUM_TYPE_KEY, "NULL");
    conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, TEST_FILE_LENGTH);
    MiniDFSCluster cluster = null;
    final Path TEST_PATH = new Path("/a");
    final String CONTEXT = "test2GBMmapLimit";
    conf.set(HdfsClientConfigKeys.DFS_CLIENT_CONTEXT, CONTEXT);
    FSDataInputStream fsIn = null, fsIn2 = null;
    ByteBuffer buf1 = null, buf2 = null;
    try {
        cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
        cluster.waitActive();
        DistributedFileSystem fs = cluster.getFileSystem();
        DFSTestUtil.createFile(fs, TEST_PATH, TEST_FILE_LENGTH, (short) 1, 0xB);
        DFSTestUtil.waitReplication(fs, TEST_PATH, (short) 1);
        fsIn = fs.open(TEST_PATH);
        buf1 = fsIn.read(null, 1, EnumSet.of(ReadOption.SKIP_CHECKSUMS));
        Assert.assertEquals(1, buf1.remaining());
        fsIn.releaseBuffer(buf1);
        buf1 = null;
        fsIn.seek(2147483640L);
        buf1 = fsIn.read(null, 1024, EnumSet.of(ReadOption.SKIP_CHECKSUMS));
        Assert.assertEquals(7, buf1.remaining());
        Assert.assertEquals(Integer.MAX_VALUE, buf1.limit());
        fsIn.releaseBuffer(buf1);
        buf1 = null;
        Assert.assertEquals(2147483647L, fsIn.getPos());
        try {
            buf1 = fsIn.read(null, 1024, EnumSet.of(ReadOption.SKIP_CHECKSUMS));
            Assert.fail("expected UnsupportedOperationException");
        } catch (UnsupportedOperationException e) {
        // expected; can't read past 2GB boundary.
        }
        fsIn.close();
        fsIn = null;
        // Now create another file with normal-sized blocks, and verify we
        // can read past 2GB
        final Path TEST_PATH2 = new Path("/b");
        conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 268435456L);
        DFSTestUtil.createFile(fs, TEST_PATH2, 1024 * 1024, TEST_FILE_LENGTH, 268435456L, (short) 1, 0xA);
        fsIn2 = fs.open(TEST_PATH2);
        fsIn2.seek(2147483640L);
        buf2 = fsIn2.read(null, 1024, EnumSet.of(ReadOption.SKIP_CHECKSUMS));
        Assert.assertEquals(8, buf2.remaining());
        Assert.assertEquals(2147483648L, fsIn2.getPos());
        fsIn2.releaseBuffer(buf2);
        buf2 = null;
        buf2 = fsIn2.read(null, 1024, EnumSet.of(ReadOption.SKIP_CHECKSUMS));
        Assert.assertEquals(1024, buf2.remaining());
        Assert.assertEquals(2147484672L, fsIn2.getPos());
        fsIn2.releaseBuffer(buf2);
        buf2 = null;
    } finally {
        if (buf1 != null) {
            fsIn.releaseBuffer(buf1);
        }
        if (buf2 != null) {
            fsIn2.releaseBuffer(buf2);
        }
        IOUtils.cleanup(null, fsIn, fsIn2);
        if (cluster != null) {
            cluster.shutdown();
        }
    }
}
Also used : MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) ByteBuffer(java.nio.ByteBuffer) Test(org.junit.Test)

Example 92 with DistributedFileSystem

use of org.apache.hadoop.hdfs.DistributedFileSystem in project hadoop by apache.

the class LsSnapshottableDir method run.

@Override
public int run(String[] argv) throws Exception {
    String description = "hdfs lsSnapshottableDir: \n" + "\tGet the list of snapshottable directories that are owned by the current user.\n" + "\tReturn all the snapshottable directories if the current user is a super user.\n";
    if (argv.length != 0) {
        System.err.println("Usage: \n" + description);
        return 1;
    }
    FileSystem fs = FileSystem.get(getConf());
    if (!(fs instanceof DistributedFileSystem)) {
        System.err.println("LsSnapshottableDir can only be used in DistributedFileSystem");
        return 1;
    }
    DistributedFileSystem dfs = (DistributedFileSystem) fs;
    try {
        SnapshottableDirectoryStatus[] stats = dfs.getSnapshottableDirListing();
        SnapshottableDirectoryStatus.print(stats, System.out);
    } catch (IOException e) {
        String[] content = e.getLocalizedMessage().split("\n");
        System.err.println("lsSnapshottableDir: " + content[0]);
        return 1;
    }
    return 0;
}
Also used : DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) FileSystem(org.apache.hadoop.fs.FileSystem) IOException(java.io.IOException) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) SnapshottableDirectoryStatus(org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus)

Example 93 with DistributedFileSystem

use of org.apache.hadoop.hdfs.DistributedFileSystem in project hadoop by apache.

the class SnapshotDiff method run.

@Override
public int run(String[] argv) throws Exception {
    String description = "hdfs snapshotDiff <snapshotDir> <from> <to>:\n" + "\tGet the difference between two snapshots, \n" + "\tor between a snapshot and the current tree of a directory.\n" + "\tFor <from>/<to>, users can use \".\" to present the current status,\n" + "\tand use \".snapshot/snapshot_name\" to present a snapshot,\n" + "\twhere \".snapshot/\" can be omitted\n";
    if (argv.length != 3) {
        System.err.println("Usage: \n" + description);
        return 1;
    }
    FileSystem fs = FileSystem.get(new Path(argv[0]).toUri(), getConf());
    if (!(fs instanceof DistributedFileSystem)) {
        System.err.println("SnapshotDiff can only be used in DistributedFileSystem");
        return 1;
    }
    DistributedFileSystem dfs = (DistributedFileSystem) fs;
    Path snapshotRoot = new Path(argv[0]);
    String fromSnapshot = getSnapshotName(argv[1]);
    String toSnapshot = getSnapshotName(argv[2]);
    try {
        SnapshotDiffReport diffReport = dfs.getSnapshotDiffReport(snapshotRoot, fromSnapshot, toSnapshot);
        System.out.println(diffReport.toString());
    } catch (IOException e) {
        String[] content = e.getLocalizedMessage().split("\n");
        System.err.println("snapshotDiff: " + content[0]);
        return 1;
    }
    return 0;
}
Also used : Path(org.apache.hadoop.fs.Path) SnapshotDiffReport(org.apache.hadoop.hdfs.protocol.SnapshotDiffReport) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) FileSystem(org.apache.hadoop.fs.FileSystem) IOException(java.io.IOException) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem)

Example 94 with DistributedFileSystem

use of org.apache.hadoop.hdfs.DistributedFileSystem in project hadoop by apache.

the class TestDataNodeHotSwapVolumes method createFile.

private void createFile(int fsIdx, Path path, int numBlocks, short replicateFactor) throws IOException, TimeoutException, InterruptedException {
    final int seed = 0;
    final DistributedFileSystem fs = cluster.getFileSystem(fsIdx);
    DFSTestUtil.createFile(fs, path, BLOCK_SIZE * numBlocks, replicateFactor, seed);
    DFSTestUtil.waitReplication(fs, path, replicateFactor);
}
Also used : DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem)

Example 95 with DistributedFileSystem

use of org.apache.hadoop.hdfs.DistributedFileSystem in project hadoop by apache.

the class TestAddBlock method testAddBlock.

/**
   * Test adding new blocks. Restart the NameNode in the test to make sure the
   * AddBlockOp in the editlog is applied correctly. 
   */
@Test
public void testAddBlock() throws Exception {
    DistributedFileSystem fs = cluster.getFileSystem();
    final Path file1 = new Path("/file1");
    final Path file2 = new Path("/file2");
    final Path file3 = new Path("/file3");
    final Path file4 = new Path("/file4");
    DFSTestUtil.createFile(fs, file1, BLOCKSIZE - 1, REPLICATION, 0L);
    DFSTestUtil.createFile(fs, file2, BLOCKSIZE, REPLICATION, 0L);
    DFSTestUtil.createFile(fs, file3, BLOCKSIZE * 2 - 1, REPLICATION, 0L);
    DFSTestUtil.createFile(fs, file4, BLOCKSIZE * 2, REPLICATION, 0L);
    // restart NameNode
    cluster.restartNameNode(true);
    FSDirectory fsdir = cluster.getNamesystem().getFSDirectory();
    // check file1
    INodeFile file1Node = fsdir.getINode4Write(file1.toString()).asFile();
    BlockInfo[] file1Blocks = file1Node.getBlocks();
    assertEquals(1, file1Blocks.length);
    assertEquals(BLOCKSIZE - 1, file1Blocks[0].getNumBytes());
    assertEquals(BlockUCState.COMPLETE, file1Blocks[0].getBlockUCState());
    // check file2
    INodeFile file2Node = fsdir.getINode4Write(file2.toString()).asFile();
    BlockInfo[] file2Blocks = file2Node.getBlocks();
    assertEquals(1, file2Blocks.length);
    assertEquals(BLOCKSIZE, file2Blocks[0].getNumBytes());
    assertEquals(BlockUCState.COMPLETE, file2Blocks[0].getBlockUCState());
    // check file3
    INodeFile file3Node = fsdir.getINode4Write(file3.toString()).asFile();
    BlockInfo[] file3Blocks = file3Node.getBlocks();
    assertEquals(2, file3Blocks.length);
    assertEquals(BLOCKSIZE, file3Blocks[0].getNumBytes());
    assertEquals(BlockUCState.COMPLETE, file3Blocks[0].getBlockUCState());
    assertEquals(BLOCKSIZE - 1, file3Blocks[1].getNumBytes());
    assertEquals(BlockUCState.COMPLETE, file3Blocks[1].getBlockUCState());
    // check file4
    INodeFile file4Node = fsdir.getINode4Write(file4.toString()).asFile();
    BlockInfo[] file4Blocks = file4Node.getBlocks();
    assertEquals(2, file4Blocks.length);
    assertEquals(BLOCKSIZE, file4Blocks[0].getNumBytes());
    assertEquals(BlockUCState.COMPLETE, file4Blocks[0].getBlockUCState());
    assertEquals(BLOCKSIZE, file4Blocks[1].getNumBytes());
    assertEquals(BlockUCState.COMPLETE, file4Blocks[1].getBlockUCState());
}
Also used : Path(org.apache.hadoop.fs.Path) BlockInfo(org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) Test(org.junit.Test)

Aggregations

DistributedFileSystem (org.apache.hadoop.hdfs.DistributedFileSystem)252 Test (org.junit.Test)175 Path (org.apache.hadoop.fs.Path)169 Configuration (org.apache.hadoop.conf.Configuration)126 MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)126 HdfsConfiguration (org.apache.hadoop.hdfs.HdfsConfiguration)86 IOException (java.io.IOException)63 FSDataOutputStream (org.apache.hadoop.fs.FSDataOutputStream)36 FileSystem (org.apache.hadoop.fs.FileSystem)31 LocatedBlock (org.apache.hadoop.hdfs.protocol.LocatedBlock)31 DatanodeInfo (org.apache.hadoop.hdfs.protocol.DatanodeInfo)26 URI (java.net.URI)24 FsPermission (org.apache.hadoop.fs.permission.FsPermission)22 ExtendedBlock (org.apache.hadoop.hdfs.protocol.ExtendedBlock)19 AccessControlException (org.apache.hadoop.security.AccessControlException)19 DataNode (org.apache.hadoop.hdfs.server.datanode.DataNode)18 Matchers.anyString (org.mockito.Matchers.anyString)18 FileStatus (org.apache.hadoop.fs.FileStatus)16 ArrayList (java.util.ArrayList)14 CachePoolInfo (org.apache.hadoop.hdfs.protocol.CachePoolInfo)14