Search in sources :

Example 76 with FileSystem

use of org.apache.hadoop.fs.FileSystem in project hadoop by apache.

the class TestClientProtocolForPipelineRecovery method testPipelineRecoveryForLastBlock.

/** Test whether corrupt replicas are detected correctly during pipeline
   * recoveries.
   */
@Test
public void testPipelineRecoveryForLastBlock() throws IOException {
    DFSClientFaultInjector faultInjector = Mockito.mock(DFSClientFaultInjector.class);
    DFSClientFaultInjector oldInjector = DFSClientFaultInjector.get();
    DFSClientFaultInjector.set(faultInjector);
    Configuration conf = new HdfsConfiguration();
    conf.setInt(HdfsClientConfigKeys.BlockWrite.LOCATEFOLLOWINGBLOCK_RETRIES_KEY, 3);
    MiniDFSCluster cluster = null;
    try {
        int numDataNodes = 3;
        cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDataNodes).build();
        cluster.waitActive();
        FileSystem fileSys = cluster.getFileSystem();
        Path file = new Path("dataprotocol1.dat");
        Mockito.when(faultInjector.failPacket()).thenReturn(true);
        DFSTestUtil.createFile(fileSys, file, 68000000L, (short) numDataNodes, 0L);
        // At this point, NN should have accepted only valid replicas.
        // Read should succeed.
        FSDataInputStream in = fileSys.open(file);
        try {
            in.read();
        // Test will fail with BlockMissingException if NN does not update the
        // replica state based on the latest report.
        } catch (org.apache.hadoop.hdfs.BlockMissingException bme) {
            Assert.fail("Block is missing because the file was closed with" + " corrupt replicas.");
        }
    } finally {
        DFSClientFaultInjector.set(oldInjector);
        if (cluster != null) {
            cluster.shutdown();
        }
    }
}
Also used : Path(org.apache.hadoop.fs.Path) Configuration(org.apache.hadoop.conf.Configuration) FileSystem(org.apache.hadoop.fs.FileSystem) FSDataInputStream(org.apache.hadoop.fs.FSDataInputStream) Test(org.junit.Test)

Example 77 with FileSystem

use of org.apache.hadoop.fs.FileSystem in project hadoop by apache.

the class TestClose method testWriteAfterClose.

@Test
public void testWriteAfterClose() throws IOException {
    Configuration conf = new Configuration();
    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
    try {
        final byte[] data = "foo".getBytes();
        FileSystem fs = FileSystem.get(conf);
        OutputStream out = fs.create(new Path("/test"));
        out.write(data);
        out.close();
        try {
            // Should fail.
            out.write(data);
            fail("Should not have been able to write more data after file is closed.");
        } catch (ClosedChannelException cce) {
        // We got the correct exception. Ignoring.
        }
        // Should succeed. Double closes are OK.
        out.close();
    } finally {
        if (cluster != null) {
            cluster.shutdown();
        }
    }
}
Also used : Path(org.apache.hadoop.fs.Path) ClosedChannelException(java.nio.channels.ClosedChannelException) Configuration(org.apache.hadoop.conf.Configuration) FileSystem(org.apache.hadoop.fs.FileSystem) OutputStream(java.io.OutputStream) Test(org.junit.Test)

Example 78 with FileSystem

use of org.apache.hadoop.fs.FileSystem in project hadoop by apache.

the class TestCrcCorruption method doTestEntirelyCorruptFile.

private void doTestEntirelyCorruptFile(int numDataNodes) throws Exception {
    long fileSize = 4096;
    Path file = new Path("/testFile");
    short replFactor = (short) numDataNodes;
    Configuration conf = new Configuration();
    conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, numDataNodes);
    // Set short retry timeouts so this test runs faster
    conf.setInt(HdfsClientConfigKeys.Retry.WINDOW_BASE_KEY, 10);
    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDataNodes).build();
    try {
        cluster.waitActive();
        FileSystem fs = cluster.getFileSystem();
        DFSTestUtil.createFile(fs, file, fileSize, replFactor, 12345L);
        DFSTestUtil.waitReplication(fs, file, replFactor);
        ExtendedBlock block = DFSTestUtil.getFirstBlock(fs, file);
        int blockFilesCorrupted = cluster.corruptBlockOnDataNodes(block);
        assertEquals("All replicas not corrupted", replFactor, blockFilesCorrupted);
        try {
            IOUtils.copyBytes(fs.open(file), new IOUtils.NullOutputStream(), conf, true);
            fail("Didn't get exception");
        } catch (IOException ioe) {
            DFSClient.LOG.info("Got expected exception", ioe);
        }
    } finally {
        cluster.shutdown();
    }
}
Also used : Path(org.apache.hadoop.fs.Path) IOUtils(org.apache.hadoop.io.IOUtils) Configuration(org.apache.hadoop.conf.Configuration) FileSystem(org.apache.hadoop.fs.FileSystem) ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) IOException(java.io.IOException)

Example 79 with FileSystem

use of org.apache.hadoop.fs.FileSystem in project hadoop by apache.

the class TestCrcCorruption method testCorruptionDuringWrt.

/** 
   * Test case for data corruption during data transmission for
   * create/write. To recover from corruption while writing, at
   * least two replicas are needed.
   */
@Test(timeout = 50000)
public void testCorruptionDuringWrt() throws Exception {
    Configuration conf = new HdfsConfiguration();
    // Set short retry timeouts so this test runs faster
    conf.setInt(HdfsClientConfigKeys.Retry.WINDOW_BASE_KEY, 10);
    MiniDFSCluster cluster = null;
    try {
        cluster = new MiniDFSCluster.Builder(conf).numDataNodes(10).build();
        cluster.waitActive();
        FileSystem fs = cluster.getFileSystem();
        Path file = new Path("/test_corruption_file");
        FSDataOutputStream out = fs.create(file, true, 8192, (short) 3, (long) (128 * 1024 * 1024));
        byte[] data = new byte[65536];
        for (int i = 0; i < 65536; i++) {
            data[i] = (byte) (i % 256);
        }
        for (int i = 0; i < 5; i++) {
            out.write(data, 0, 65535);
        }
        out.hflush();
        // corrupt the packet once
        Mockito.when(faultInjector.corruptPacket()).thenReturn(true, false);
        Mockito.when(faultInjector.uncorruptPacket()).thenReturn(true, false);
        for (int i = 0; i < 5; i++) {
            out.write(data, 0, 65535);
        }
        out.close();
        // read should succeed
        FSDataInputStream in = fs.open(file);
        for (int c; (c = in.read()) != -1; ) ;
        in.close();
        // test the retry limit
        out = fs.create(file, true, 8192, (short) 3, (long) (128 * 1024 * 1024));
        // corrupt the packet once and never fix it.
        Mockito.when(faultInjector.corruptPacket()).thenReturn(true, false);
        Mockito.when(faultInjector.uncorruptPacket()).thenReturn(false);
        // the client should give up pipeline reconstruction after retries.
        try {
            for (int i = 0; i < 5; i++) {
                out.write(data, 0, 65535);
            }
            out.close();
            fail("Write did not fail");
        } catch (IOException ioe) {
            // we should get an ioe
            DFSClient.LOG.info("Got expected exception", ioe);
        }
    } finally {
        if (cluster != null) {
            cluster.shutdown();
        }
        Mockito.when(faultInjector.corruptPacket()).thenReturn(false);
        Mockito.when(faultInjector.uncorruptPacket()).thenReturn(false);
    }
}
Also used : Path(org.apache.hadoop.fs.Path) Configuration(org.apache.hadoop.conf.Configuration) FileSystem(org.apache.hadoop.fs.FileSystem) FSDataInputStream(org.apache.hadoop.fs.FSDataInputStream) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream) IOException(java.io.IOException) Test(org.junit.Test)

Example 80 with FileSystem

use of org.apache.hadoop.fs.FileSystem in project hadoop by apache.

the class TestDFSClientExcludedNodes method testExcludedNodes.

@Test(timeout = 60000)
public void testExcludedNodes() throws IOException {
    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
    FileSystem fs = cluster.getFileSystem();
    Path filePath = new Path("/testExcludedNodes");
    // kill a datanode
    cluster.stopDataNode(AppendTestUtil.nextInt(3));
    OutputStream out = fs.create(filePath, true, 4096, (short) 3, fs.getDefaultBlockSize(filePath));
    out.write(20);
    try {
        out.close();
    } catch (Exception e) {
        fail("Single DN failure should not result in a block abort: \n" + e.getMessage());
    }
}
Also used : Path(org.apache.hadoop.fs.Path) FileSystem(org.apache.hadoop.fs.FileSystem) OutputStream(java.io.OutputStream) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream) IOException(java.io.IOException) Test(org.junit.Test)

Aggregations

FileSystem (org.apache.hadoop.fs.FileSystem)2611 Path (org.apache.hadoop.fs.Path)2199 Test (org.junit.Test)1034 Configuration (org.apache.hadoop.conf.Configuration)890 IOException (java.io.IOException)757 FileStatus (org.apache.hadoop.fs.FileStatus)419 FSDataOutputStream (org.apache.hadoop.fs.FSDataOutputStream)264 MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)227 ArrayList (java.util.ArrayList)208 File (java.io.File)181 DistributedFileSystem (org.apache.hadoop.hdfs.DistributedFileSystem)165 JobConf (org.apache.hadoop.mapred.JobConf)163 FSDataInputStream (org.apache.hadoop.fs.FSDataInputStream)151 HdfsConfiguration (org.apache.hadoop.hdfs.HdfsConfiguration)145 URI (java.net.URI)135 SequenceFile (org.apache.hadoop.io.SequenceFile)118 Text (org.apache.hadoop.io.Text)112 FileNotFoundException (java.io.FileNotFoundException)102 FsPermission (org.apache.hadoop.fs.permission.FsPermission)94 Job (org.apache.hadoop.mapreduce.Job)81