Search in sources :

Example 76 with FSDataOutputStream

use of org.apache.hadoop.fs.FSDataOutputStream in project hadoop by apache.

the class TestFileLengthOnClusterRestart method testFileLengthWithHSyncAndClusterRestartWithOutDNsRegister.

/**
   * Tests the fileLength when we sync the file and restart the cluster and
   * Datanodes not report to Namenode yet.
   */
@Test(timeout = 60000)
public void testFileLengthWithHSyncAndClusterRestartWithOutDNsRegister() throws Exception {
    final Configuration conf = new HdfsConfiguration();
    // create cluster
    conf.setInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 512);
    final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
    HdfsDataInputStream in = null;
    try {
        Path path = new Path("/tmp/TestFileLengthOnClusterRestart", "test");
        DistributedFileSystem dfs = cluster.getFileSystem();
        FSDataOutputStream out = dfs.create(path);
        int fileLength = 1030;
        out.write(new byte[fileLength]);
        out.hsync();
        cluster.restartNameNode();
        cluster.waitActive();
        in = (HdfsDataInputStream) dfs.open(path, 1024);
        // Verify the length when we just restart NN. DNs will register
        // immediately.
        Assert.assertEquals(fileLength, in.getVisibleLength());
        cluster.shutdownDataNodes();
        cluster.restartNameNode(false);
        // This is just for ensuring NN started.
        verifyNNIsInSafeMode(dfs);
        try {
            in = (HdfsDataInputStream) dfs.open(path);
            Assert.fail("Expected IOException");
        } catch (IOException e) {
            Assert.assertTrue(e.getLocalizedMessage().indexOf("Name node is in safe mode") >= 0);
        }
    } finally {
        if (null != in) {
            in.close();
        }
        cluster.shutdown();
    }
}
Also used : Path(org.apache.hadoop.fs.Path) Configuration(org.apache.hadoop.conf.Configuration) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream) IOException(java.io.IOException) HdfsDataInputStream(org.apache.hadoop.hdfs.client.HdfsDataInputStream) Test(org.junit.Test)

Example 77 with FSDataOutputStream

use of org.apache.hadoop.fs.FSDataOutputStream in project hadoop by apache.

the class TestFileStatus method testGetFileInfo.

/** Test calling getFileInfo directly on the client */
@Test
public void testGetFileInfo() throws IOException {
    // Check that / exists
    Path path = new Path("/");
    assertTrue("/ should be a directory", fs.getFileStatus(path).isDirectory());
    // Make sure getFileInfo returns null for files which do not exist
    HdfsFileStatus fileInfo = dfsClient.getFileInfo("/noSuchFile");
    assertEquals("Non-existant file should result in null", null, fileInfo);
    Path path1 = new Path("/name1");
    Path path2 = new Path("/name1/name2");
    assertTrue(fs.mkdirs(path1));
    FSDataOutputStream out = fs.create(path2, false);
    out.close();
    fileInfo = dfsClient.getFileInfo(path1.toString());
    assertEquals(1, fileInfo.getChildrenNum());
    fileInfo = dfsClient.getFileInfo(path2.toString());
    assertEquals(0, fileInfo.getChildrenNum());
    // Test getFileInfo throws the right exception given a non-absolute path.
    try {
        dfsClient.getFileInfo("non-absolute");
        fail("getFileInfo for a non-absolute path did not throw IOException");
    } catch (RemoteException re) {
        assertTrue("Wrong exception for invalid file name: " + re, re.toString().contains("Absolute path required"));
    }
}
Also used : Path(org.apache.hadoop.fs.Path) HdfsFileStatus(org.apache.hadoop.hdfs.protocol.HdfsFileStatus) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream) RemoteException(org.apache.hadoop.ipc.RemoteException) Test(org.junit.Test)

Example 78 with FSDataOutputStream

use of org.apache.hadoop.fs.FSDataOutputStream in project hadoop by apache.

the class TestFileAppend2 method testSimpleAppend.

/**
   * Creates one file, writes a few bytes to it and then closed it.
   * Reopens the same file for appending, write all blocks and then close.
   * Verify that all data exists in file.
   * @throws IOException an exception might be thrown
   */
@Test
public void testSimpleAppend() throws IOException {
    final Configuration conf = new HdfsConfiguration();
    conf.setInt(DFSConfigKeys.DFS_DATANODE_HANDLER_COUNT_KEY, 50);
    fileContents = AppendTestUtil.initBuffer(AppendTestUtil.FILE_SIZE);
    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
    FileSystem fs = cluster.getFileSystem();
    try {
        {
            // test appending to a file.
            // create a new file.
            Path file1 = new Path("/simpleAppend.dat");
            FSDataOutputStream stm = AppendTestUtil.createFile(fs, file1, 1);
            System.out.println("Created file simpleAppend.dat");
            // write to file
            // io.bytes.per.checksum bytes
            int mid = 186;
            System.out.println("Writing " + mid + " bytes to file " + file1);
            stm.write(fileContents, 0, mid);
            stm.close();
            System.out.println("Wrote and Closed first part of file.");
            // write to file
            // io.bytes.per.checksum bytes
            int mid2 = 607;
            System.out.println("Writing " + mid + " bytes to file " + file1);
            stm = fs.append(file1);
            stm.write(fileContents, mid, mid2 - mid);
            stm.close();
            System.out.println("Wrote and Closed second part of file.");
            // write the remainder of the file
            stm = fs.append(file1);
            // ensure getPos is set to reflect existing size of the file
            assertTrue(stm.getPos() > 0);
            System.out.println("Writing " + (AppendTestUtil.FILE_SIZE - mid2) + " bytes to file " + file1);
            stm.write(fileContents, mid2, AppendTestUtil.FILE_SIZE - mid2);
            System.out.println("Written second part of file");
            stm.close();
            System.out.println("Wrote and Closed second part of file.");
            // verify that entire file is good
            AppendTestUtil.checkFullFile(fs, file1, AppendTestUtil.FILE_SIZE, fileContents, "Read 2");
        }
        {
            // test appending to an non-existing file.
            FSDataOutputStream out = null;
            try {
                out = fs.append(new Path("/non-existing.dat"));
                fail("Expected to have FileNotFoundException");
            } catch (java.io.FileNotFoundException fnfe) {
                System.out.println("Good: got " + fnfe);
                fnfe.printStackTrace(System.out);
            } finally {
                IOUtils.closeStream(out);
            }
        }
        {
            // test append permission.
            //set root to all writable 
            Path root = new Path("/");
            fs.setPermission(root, new FsPermission((short) 0777));
            fs.close();
            // login as a different user
            final UserGroupInformation superuser = UserGroupInformation.getCurrentUser();
            String username = "testappenduser";
            String group = "testappendgroup";
            assertFalse(superuser.getShortUserName().equals(username));
            assertFalse(Arrays.asList(superuser.getGroupNames()).contains(group));
            UserGroupInformation appenduser = UserGroupInformation.createUserForTesting(username, new String[] { group });
            fs = DFSTestUtil.getFileSystemAs(appenduser, conf);
            // create a file
            Path dir = new Path(root, getClass().getSimpleName());
            Path foo = new Path(dir, "foo.dat");
            FSDataOutputStream out = null;
            int offset = 0;
            try {
                out = fs.create(foo);
                int len = 10 + AppendTestUtil.nextInt(100);
                out.write(fileContents, offset, len);
                offset += len;
            } finally {
                IOUtils.closeStream(out);
            }
            // change dir and foo to minimal permissions.
            fs.setPermission(dir, new FsPermission((short) 0100));
            fs.setPermission(foo, new FsPermission((short) 0200));
            // try append, should success
            out = null;
            try {
                out = fs.append(foo);
                int len = 10 + AppendTestUtil.nextInt(100);
                out.write(fileContents, offset, len);
                offset += len;
            } finally {
                IOUtils.closeStream(out);
            }
            // change dir and foo to all but no write on foo.
            fs.setPermission(foo, new FsPermission((short) 0577));
            fs.setPermission(dir, new FsPermission((short) 0777));
            // try append, should fail
            out = null;
            try {
                out = fs.append(foo);
                fail("Expected to have AccessControlException");
            } catch (AccessControlException ace) {
                System.out.println("Good: got " + ace);
                ace.printStackTrace(System.out);
            } finally {
                IOUtils.closeStream(out);
            }
        }
    } catch (IOException e) {
        System.out.println("Exception :" + e);
        throw e;
    } catch (Throwable e) {
        System.out.println("Throwable :" + e);
        e.printStackTrace();
        throw new IOException("Throwable : " + e);
    } finally {
        fs.close();
        cluster.shutdown();
    }
}
Also used : Path(org.apache.hadoop.fs.Path) Configuration(org.apache.hadoop.conf.Configuration) AccessControlException(org.apache.hadoop.security.AccessControlException) IOException(java.io.IOException) FileSystem(org.apache.hadoop.fs.FileSystem) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream) FsPermission(org.apache.hadoop.fs.permission.FsPermission) UserGroupInformation(org.apache.hadoop.security.UserGroupInformation) Test(org.junit.Test)

Example 79 with FSDataOutputStream

use of org.apache.hadoop.fs.FSDataOutputStream in project hadoop by apache.

the class TestFileAppend3 method testTC1.

/**
   * TC1: Append on block boundary.
   * @throws IOException an exception might be thrown
   */
@Test
public void testTC1() throws Exception {
    final Path p = new Path("/TC1/foo");
    System.out.println("p=" + p);
    //a. Create file and write one block of data. Close file.
    final int len1 = (int) BLOCK_SIZE;
    {
        FSDataOutputStream out = fs.create(p, false, buffersize, REPLICATION, BLOCK_SIZE);
        AppendTestUtil.write(out, 0, len1);
        out.close();
    }
    //   Reopen file to append. Append half block of data. Close file.
    final int len2 = (int) BLOCK_SIZE / 2;
    {
        FSDataOutputStream out = fs.append(p);
        AppendTestUtil.write(out, len1, len2);
        out.close();
    }
    //b. Reopen file and read 1.5 blocks worth of data. Close file.
    AppendTestUtil.check(fs, p, len1 + len2);
}
Also used : Path(org.apache.hadoop.fs.Path) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream) Test(org.junit.Test)

Example 80 with FSDataOutputStream

use of org.apache.hadoop.fs.FSDataOutputStream in project hadoop by apache.

the class TestFileAppend3 method testTC5ForAppend2.

@Test
public void testTC5ForAppend2() throws Exception {
    final Path p = new Path("/TC5/foo2");
    // a. Create file on Machine M1. Write half block to it. Close file.
    {
        FSDataOutputStream out = fs.create(p, false, buffersize, REPLICATION, BLOCK_SIZE);
        AppendTestUtil.write(out, 0, (int) (BLOCK_SIZE / 2));
        out.close();
    }
    // b. Reopen file in "append" mode on Machine M1.
    FSDataOutputStream out = fs.append(p, EnumSet.of(CreateFlag.APPEND, CreateFlag.NEW_BLOCK), 4096, null);
    // c. On Machine M2, reopen file in "append" mode. This should fail.
    try {
        ((DistributedFileSystem) AppendTestUtil.createHdfsWithDifferentUsername(conf)).append(p, EnumSet.of(CreateFlag.APPEND, CreateFlag.NEW_BLOCK), 4096, null);
        fail("This should fail.");
    } catch (IOException ioe) {
        AppendTestUtil.LOG.info("GOOD: got an exception", ioe);
    }
    try {
        AppendTestUtil.createHdfsWithDifferentUsername(conf).append(p);
        fail("This should fail.");
    } catch (IOException ioe) {
        AppendTestUtil.LOG.info("GOOD: got an exception", ioe);
    }
    // d. On Machine M1, close file.
    out.close();
}
Also used : Path(org.apache.hadoop.fs.Path) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream) IOException(java.io.IOException) Test(org.junit.Test)

Aggregations

FSDataOutputStream (org.apache.hadoop.fs.FSDataOutputStream)789 Path (org.apache.hadoop.fs.Path)618 Test (org.junit.Test)345 FileSystem (org.apache.hadoop.fs.FileSystem)248 Configuration (org.apache.hadoop.conf.Configuration)190 IOException (java.io.IOException)163 FSDataInputStream (org.apache.hadoop.fs.FSDataInputStream)94 IgfsPath (org.apache.ignite.igfs.IgfsPath)78 DistributedFileSystem (org.apache.hadoop.hdfs.DistributedFileSystem)66 MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)65 FileStatus (org.apache.hadoop.fs.FileStatus)57 FsPermission (org.apache.hadoop.fs.permission.FsPermission)45 CreateFlag (org.apache.hadoop.fs.CreateFlag)43 FileNotFoundException (java.io.FileNotFoundException)40 HdfsConfiguration (org.apache.hadoop.hdfs.HdfsConfiguration)40 ArrayList (java.util.ArrayList)38 LocatedBlock (org.apache.hadoop.hdfs.protocol.LocatedBlock)33 LocatedBlocks (org.apache.hadoop.hdfs.protocol.LocatedBlocks)31 DatanodeInfo (org.apache.hadoop.hdfs.protocol.DatanodeInfo)30 Random (java.util.Random)28