Search in sources :

Example 26 with FSDataOutputStream

use of org.apache.hadoop.fs.FSDataOutputStream in project hadoop by apache.

the class TestAppendDifferentChecksum method appendWithTwoFs.

private void appendWithTwoFs(Path p, FileSystem fs1, FileSystem fs2) throws IOException {
    FSDataOutputStream stm = fs1.create(p);
    try {
        AppendTestUtil.write(stm, 0, SEGMENT_LENGTH);
    } finally {
        stm.close();
    }
    stm = fs2.append(p);
    try {
        AppendTestUtil.write(stm, SEGMENT_LENGTH, SEGMENT_LENGTH);
    } finally {
        stm.close();
    }
}
Also used : FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream)

Example 27 with FSDataOutputStream

use of org.apache.hadoop.fs.FSDataOutputStream in project hadoop by apache.

the class TestRenameWhileOpen method testWhileOpenRenameToExistentDirectory.

/**
   * open /user/dir1/file1 
   * mkdir /user/dir2
   * move /user/dir1/file1 /user/dir2/
   */
@Test
public void testWhileOpenRenameToExistentDirectory() throws IOException {
    Configuration conf = new HdfsConfiguration();
    // 2s
    final int MAX_IDLE_TIME = 2000;
    conf.setInt("ipc.client.connection.maxidletime", MAX_IDLE_TIME);
    conf.setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 1000);
    conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1);
    conf.setInt(DFSConfigKeys.DFS_NAMENODE_SAFEMODE_THRESHOLD_PCT_KEY, 1);
    System.out.println("Test 3************************************");
    // create cluster
    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
    FileSystem fs = null;
    try {
        cluster.waitActive();
        fs = cluster.getFileSystem();
        // create file1.
        Path dir1 = new Path("/user/dir1");
        Path file1 = new Path(dir1, "file1");
        FSDataOutputStream stm1 = TestFileCreation.createFile(fs, file1, 1);
        System.out.println("testFileCreationDeleteParent: " + "Created file " + file1);
        TestFileCreation.writeFile(stm1);
        stm1.hflush();
        Path dir2 = new Path("/user/dir2");
        fs.mkdirs(dir2);
        fs.rename(file1, dir2);
        // restart cluster.
        // This ensures that leases are persisted in fsimage.
        cluster.shutdown();
        try {
            Thread.sleep(2 * MAX_IDLE_TIME);
        } catch (InterruptedException e) {
        }
        cluster = new MiniDFSCluster.Builder(conf).format(false).build();
        cluster.waitActive();
        // restart cluster yet again. This triggers the code to read in
        // persistent leases from fsimage.
        cluster.shutdown();
        try {
            Thread.sleep(5000);
        } catch (InterruptedException e) {
        }
        cluster = new MiniDFSCluster.Builder(conf).format(false).build();
        cluster.waitActive();
        fs = cluster.getFileSystem();
        Path newfile = new Path("/user/dir2", "file1");
        assertTrue(!fs.exists(file1));
        assertTrue(fs.exists(newfile));
        checkFullFile(fs, newfile);
    } finally {
        fs.close();
        cluster.shutdown();
    }
}
Also used : Path(org.apache.hadoop.fs.Path) Configuration(org.apache.hadoop.conf.Configuration) FileSystem(org.apache.hadoop.fs.FileSystem) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream) Test(org.junit.Test)

Example 28 with FSDataOutputStream

use of org.apache.hadoop.fs.FSDataOutputStream in project hadoop by apache.

the class TestRenameWhileOpen method testWhileOpenRenameParent.

/**
   * open /user/dir1/file1 /user/dir2/file2
   * mkdir /user/dir3
   * move /user/dir1 /user/dir3
   */
@Test
public void testWhileOpenRenameParent() throws IOException {
    Configuration conf = new HdfsConfiguration();
    // 2s
    final int MAX_IDLE_TIME = 2000;
    conf.setInt("ipc.client.connection.maxidletime", MAX_IDLE_TIME);
    conf.setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 1000);
    conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1);
    conf.setInt(DFSConfigKeys.DFS_NAMENODE_SAFEMODE_THRESHOLD_PCT_KEY, 1);
    conf.setInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, TestFileCreation.blockSize);
    // create cluster
    System.out.println("Test 1*****************************");
    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
    FileSystem fs = null;
    try {
        cluster.waitActive();
        fs = cluster.getFileSystem();
        // Normally, the in-progress edit log would be finalized by
        // FSEditLog#endCurrentLogSegment.  For testing purposes, we
        // disable that here.
        FSEditLog spyLog = spy(cluster.getNameNode().getFSImage().getEditLog());
        doNothing().when(spyLog).endCurrentLogSegment(Mockito.anyBoolean());
        DFSTestUtil.setEditLogForTesting(cluster.getNamesystem(), spyLog);
        // create file1.
        Path dir1 = new Path("/user/a+b/dir1");
        Path file1 = new Path(dir1, "file1");
        FSDataOutputStream stm1 = TestFileCreation.createFile(fs, file1, 1);
        System.out.println("testFileCreationDeleteParent: " + "Created file " + file1);
        TestFileCreation.writeFile(stm1);
        stm1.hflush();
        // create file2.
        Path dir2 = new Path("/user/dir2");
        Path file2 = new Path(dir2, "file2");
        FSDataOutputStream stm2 = TestFileCreation.createFile(fs, file2, 1);
        System.out.println("testFileCreationDeleteParent: " + "Created file " + file2);
        TestFileCreation.writeFile(stm2);
        stm2.hflush();
        // move dir1 while file1 is open
        Path dir3 = new Path("/user/dir3");
        fs.mkdirs(dir3);
        fs.rename(dir1, dir3);
        // create file3
        Path file3 = new Path(dir3, "file3");
        FSDataOutputStream stm3 = fs.create(file3);
        fs.rename(file3, new Path(dir3, "bozo"));
        // Get a new block for the file.
        TestFileCreation.writeFile(stm3, TestFileCreation.blockSize + 1);
        stm3.hflush();
        // Stop the NameNode before closing the files.
        // This will ensure that the write leases are still active and present
        // in the edit log.  Simiarly, there should be a pending ADD_BLOCK_OP
        // for file3, since we just added a block to that file.
        cluster.getNameNode().stop();
        // Restart cluster.
        cluster.shutdown();
        try {
            Thread.sleep(2 * MAX_IDLE_TIME);
        } catch (InterruptedException e) {
        }
        cluster = new MiniDFSCluster.Builder(conf).format(false).build();
        cluster.waitActive();
        // restart cluster yet again. This triggers the code to read in
        // persistent leases from the edit log.
        cluster.shutdown();
        try {
            Thread.sleep(5000);
        } catch (InterruptedException e) {
        }
        cluster = new MiniDFSCluster.Builder(conf).format(false).build();
        cluster.waitActive();
        fs = cluster.getFileSystem();
        Path newfile = new Path("/user/dir3/dir1", "file1");
        assertTrue(!fs.exists(file1));
        assertTrue(fs.exists(file2));
        assertTrue(fs.exists(newfile));
        checkFullFile(fs, newfile);
    } finally {
        fs.close();
        cluster.shutdown();
    }
}
Also used : Path(org.apache.hadoop.fs.Path) Configuration(org.apache.hadoop.conf.Configuration) FSEditLog(org.apache.hadoop.hdfs.server.namenode.FSEditLog) FileSystem(org.apache.hadoop.fs.FileSystem) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream) Test(org.junit.Test)

Example 29 with FSDataOutputStream

use of org.apache.hadoop.fs.FSDataOutputStream in project hadoop by apache.

the class TestReplaceDatanodeOnFailure method testBestEffort.

@Test
public void testBestEffort() throws Exception {
    final Configuration conf = new HdfsConfiguration();
    //always replace a datanode but do not throw exception
    ReplaceDatanodeOnFailure.write(Policy.ALWAYS, true, conf);
    final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
    try {
        final DistributedFileSystem fs = cluster.getFileSystem();
        final Path f = new Path(DIR, "testIgnoreReplaceFailure");
        final byte[] bytes = new byte[1000];
        {
            LOG.info("write " + bytes.length + " bytes to " + f);
            final FSDataOutputStream out = fs.create(f, REPLICATION);
            out.write(bytes);
            out.close();
            final FileStatus status = fs.getFileStatus(f);
            Assert.assertEquals(REPLICATION, status.getReplication());
            Assert.assertEquals(bytes.length, status.getLen());
        }
        {
            LOG.info("append another " + bytes.length + " bytes to " + f);
            final FSDataOutputStream out = fs.append(f);
            out.write(bytes);
            out.close();
        }
    } finally {
        if (cluster != null) {
            cluster.shutdown();
        }
    }
}
Also used : Path(org.apache.hadoop.fs.Path) FileStatus(org.apache.hadoop.fs.FileStatus) Configuration(org.apache.hadoop.conf.Configuration) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream) Test(org.junit.Test)

Example 30 with FSDataOutputStream

use of org.apache.hadoop.fs.FSDataOutputStream in project hadoop by apache.

the class TestReplaceDatanodeOnFailure method testAppend.

@Test
public void testAppend() throws Exception {
    final Configuration conf = new HdfsConfiguration();
    final short REPLICATION = (short) 3;
    final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
    try {
        final DistributedFileSystem fs = cluster.getFileSystem();
        final Path f = new Path(DIR, "testAppend");
        {
            LOG.info("create an empty file " + f);
            fs.create(f, REPLICATION).close();
            final FileStatus status = fs.getFileStatus(f);
            Assert.assertEquals(REPLICATION, status.getReplication());
            Assert.assertEquals(0L, status.getLen());
        }
        final byte[] bytes = new byte[1000];
        {
            LOG.info("append " + bytes.length + " bytes to " + f);
            final FSDataOutputStream out = fs.append(f);
            out.write(bytes);
            out.close();
            final FileStatus status = fs.getFileStatus(f);
            Assert.assertEquals(REPLICATION, status.getReplication());
            Assert.assertEquals(bytes.length, status.getLen());
        }
        {
            LOG.info("append another " + bytes.length + " bytes to " + f);
            try {
                final FSDataOutputStream out = fs.append(f);
                out.write(bytes);
                out.close();
                Assert.fail();
            } catch (IOException ioe) {
                LOG.info("This exception is expected", ioe);
            }
        }
    } finally {
        if (cluster != null) {
            cluster.shutdown();
        }
    }
}
Also used : Path(org.apache.hadoop.fs.Path) FileStatus(org.apache.hadoop.fs.FileStatus) Configuration(org.apache.hadoop.conf.Configuration) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream) IOException(java.io.IOException) Test(org.junit.Test)

Aggregations

FSDataOutputStream (org.apache.hadoop.fs.FSDataOutputStream)789 Path (org.apache.hadoop.fs.Path)618 Test (org.junit.Test)345 FileSystem (org.apache.hadoop.fs.FileSystem)248 Configuration (org.apache.hadoop.conf.Configuration)190 IOException (java.io.IOException)163 FSDataInputStream (org.apache.hadoop.fs.FSDataInputStream)94 IgfsPath (org.apache.ignite.igfs.IgfsPath)78 DistributedFileSystem (org.apache.hadoop.hdfs.DistributedFileSystem)66 MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)65 FileStatus (org.apache.hadoop.fs.FileStatus)57 FsPermission (org.apache.hadoop.fs.permission.FsPermission)45 CreateFlag (org.apache.hadoop.fs.CreateFlag)43 FileNotFoundException (java.io.FileNotFoundException)40 HdfsConfiguration (org.apache.hadoop.hdfs.HdfsConfiguration)40 ArrayList (java.util.ArrayList)38 LocatedBlock (org.apache.hadoop.hdfs.protocol.LocatedBlock)33 LocatedBlocks (org.apache.hadoop.hdfs.protocol.LocatedBlocks)31 DatanodeInfo (org.apache.hadoop.hdfs.protocol.DatanodeInfo)30 Random (java.util.Random)28