Search in sources :

Example 21 with FSDataOutputStream

use of org.apache.hadoop.fs.FSDataOutputStream in project hadoop by apache.

the class TestClientProtocolForPipelineRecovery method testEvictWriter.

/**
   * Test that the writer is kicked out of a node.
   */
@Test
public void testEvictWriter() throws Exception {
    Configuration conf = new HdfsConfiguration();
    MiniDFSCluster cluster = null;
    try {
        cluster = new MiniDFSCluster.Builder(conf).numDataNodes((int) 3).build();
        cluster.waitActive();
        FileSystem fs = cluster.getFileSystem();
        Path file = new Path("testEvictWriter.dat");
        FSDataOutputStream out = fs.create(file, (short) 2);
        out.write(0x31);
        out.hflush();
        // get nodes in the pipeline
        DFSOutputStream dfsOut = (DFSOutputStream) out.getWrappedStream();
        DatanodeInfo[] nodes = dfsOut.getPipeline();
        Assert.assertEquals(2, nodes.length);
        String dnAddr = nodes[1].getIpcAddr(false);
        // evict the writer from the second datanode and wait until
        // the pipeline is rebuilt.
        DFSAdmin dfsadmin = new DFSAdmin(conf);
        final String[] args1 = { "-evictWriters", dnAddr };
        Assert.assertEquals(0, dfsadmin.run(args1));
        out.write(0x31);
        out.hflush();
        // get the new pipline and check the node is not in there.
        nodes = dfsOut.getPipeline();
        try {
            Assert.assertTrue(nodes.length > 0);
            for (int i = 0; i < nodes.length; i++) {
                Assert.assertFalse(dnAddr.equals(nodes[i].getIpcAddr(false)));
            }
        } finally {
            out.close();
        }
    } finally {
        if (cluster != null) {
            cluster.shutdown();
        }
    }
}
Also used : Path(org.apache.hadoop.fs.Path) DatanodeInfo(org.apache.hadoop.hdfs.protocol.DatanodeInfo) Configuration(org.apache.hadoop.conf.Configuration) FileSystem(org.apache.hadoop.fs.FileSystem) DFSAdmin(org.apache.hadoop.hdfs.tools.DFSAdmin) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream) Test(org.junit.Test)

Example 22 with FSDataOutputStream

use of org.apache.hadoop.fs.FSDataOutputStream in project hadoop by apache.

the class TestClientProtocolForPipelineRecovery method testPacketTransmissionDelay.

@Test
public void testPacketTransmissionDelay() throws Exception {
    // Make the first datanode to not relay heartbeat packet.
    DataNodeFaultInjector dnFaultInjector = new DataNodeFaultInjector() {

        @Override
        public boolean dropHeartbeatPacket() {
            return true;
        }
    };
    DataNodeFaultInjector oldDnInjector = DataNodeFaultInjector.get();
    DataNodeFaultInjector.set(dnFaultInjector);
    // Setting the timeout to be 3 seconds. Normally heartbeat packet
    // would be sent every 1.5 seconds if there is no data traffic.
    Configuration conf = new HdfsConfiguration();
    conf.set(HdfsClientConfigKeys.DFS_CLIENT_SOCKET_TIMEOUT_KEY, "3000");
    MiniDFSCluster cluster = null;
    try {
        int numDataNodes = 2;
        cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDataNodes).build();
        cluster.waitActive();
        FileSystem fs = cluster.getFileSystem();
        FSDataOutputStream out = fs.create(new Path("noheartbeat.dat"), (short) 2);
        out.write(0x31);
        out.hflush();
        DFSOutputStream dfsOut = (DFSOutputStream) out.getWrappedStream();
        // original pipeline
        DatanodeInfo[] orgNodes = dfsOut.getPipeline();
        // Cause the second datanode to timeout on reading packet
        Thread.sleep(3500);
        out.write(0x32);
        out.hflush();
        // new pipeline
        DatanodeInfo[] newNodes = dfsOut.getPipeline();
        out.close();
        boolean contains = false;
        for (int i = 0; i < newNodes.length; i++) {
            if (orgNodes[0].getXferAddr().equals(newNodes[i].getXferAddr())) {
                throw new IOException("The first datanode should have been replaced.");
            }
            if (orgNodes[1].getXferAddr().equals(newNodes[i].getXferAddr())) {
                contains = true;
            }
        }
        Assert.assertTrue(contains);
    } finally {
        DataNodeFaultInjector.set(oldDnInjector);
        if (cluster != null) {
            cluster.shutdown();
        }
    }
}
Also used : Path(org.apache.hadoop.fs.Path) DatanodeInfo(org.apache.hadoop.hdfs.protocol.DatanodeInfo) Configuration(org.apache.hadoop.conf.Configuration) DataNodeFaultInjector(org.apache.hadoop.hdfs.server.datanode.DataNodeFaultInjector) IOException(java.io.IOException) FileSystem(org.apache.hadoop.fs.FileSystem) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream) Test(org.junit.Test)

Example 23 with FSDataOutputStream

use of org.apache.hadoop.fs.FSDataOutputStream in project hadoop by apache.

the class TestClientProtocolForPipelineRecovery method testZeroByteBlockRecovery.

/**
   * Test to make sure the checksum is set correctly after pipeline
   * recovery transfers 0 byte partial block. If fails the test case
   * will say "java.io.IOException: Failed to replace a bad datanode
   * on the existing pipeline due to no more good datanodes being
   * available to try."  This indicates there was a real failure
   * after the staged failure.
   */
@Test
public void testZeroByteBlockRecovery() throws Exception {
    // Make the first datanode fail once. With 3 nodes and a block being
    // created with 2 replicas, anything more than this planned failure
    // will cause a test failure.
    DataNodeFaultInjector dnFaultInjector = new DataNodeFaultInjector() {

        int tries = 1;

        @Override
        public void stopSendingPacketDownstream(final String mirrAddr) throws IOException {
            if (tries > 0) {
                tries--;
                try {
                    Thread.sleep(60000);
                } catch (InterruptedException ie) {
                    throw new IOException("Interrupted while sleeping. Bailing out.");
                }
            }
        }
    };
    DataNodeFaultInjector oldDnInjector = DataNodeFaultInjector.get();
    DataNodeFaultInjector.set(dnFaultInjector);
    Configuration conf = new HdfsConfiguration();
    conf.set(HdfsClientConfigKeys.DFS_CLIENT_SOCKET_TIMEOUT_KEY, "1000");
    conf.set(HdfsClientConfigKeys.BlockWrite.ReplaceDatanodeOnFailure.POLICY_KEY, "ALWAYS");
    MiniDFSCluster cluster = null;
    try {
        cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
        cluster.waitActive();
        FileSystem fs = cluster.getFileSystem();
        FSDataOutputStream out = fs.create(new Path("noheartbeat.dat"), (short) 2);
        out.write(0x31);
        out.hflush();
        out.close();
    } finally {
        if (cluster != null) {
            cluster.shutdown();
        }
        DataNodeFaultInjector.set(oldDnInjector);
    }
}
Also used : Path(org.apache.hadoop.fs.Path) Configuration(org.apache.hadoop.conf.Configuration) DataNodeFaultInjector(org.apache.hadoop.hdfs.server.datanode.DataNodeFaultInjector) FileSystem(org.apache.hadoop.fs.FileSystem) IOException(java.io.IOException) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream) Test(org.junit.Test)

Example 24 with FSDataOutputStream

use of org.apache.hadoop.fs.FSDataOutputStream in project hadoop by apache.

the class TestCrcCorruption method testCorruptionDuringWrt.

/** 
   * Test case for data corruption during data transmission for
   * create/write. To recover from corruption while writing, at
   * least two replicas are needed.
   */
@Test(timeout = 50000)
public void testCorruptionDuringWrt() throws Exception {
    Configuration conf = new HdfsConfiguration();
    // Set short retry timeouts so this test runs faster
    conf.setInt(HdfsClientConfigKeys.Retry.WINDOW_BASE_KEY, 10);
    MiniDFSCluster cluster = null;
    try {
        cluster = new MiniDFSCluster.Builder(conf).numDataNodes(10).build();
        cluster.waitActive();
        FileSystem fs = cluster.getFileSystem();
        Path file = new Path("/test_corruption_file");
        FSDataOutputStream out = fs.create(file, true, 8192, (short) 3, (long) (128 * 1024 * 1024));
        byte[] data = new byte[65536];
        for (int i = 0; i < 65536; i++) {
            data[i] = (byte) (i % 256);
        }
        for (int i = 0; i < 5; i++) {
            out.write(data, 0, 65535);
        }
        out.hflush();
        // corrupt the packet once
        Mockito.when(faultInjector.corruptPacket()).thenReturn(true, false);
        Mockito.when(faultInjector.uncorruptPacket()).thenReturn(true, false);
        for (int i = 0; i < 5; i++) {
            out.write(data, 0, 65535);
        }
        out.close();
        // read should succeed
        FSDataInputStream in = fs.open(file);
        for (int c; (c = in.read()) != -1; ) ;
        in.close();
        // test the retry limit
        out = fs.create(file, true, 8192, (short) 3, (long) (128 * 1024 * 1024));
        // corrupt the packet once and never fix it.
        Mockito.when(faultInjector.corruptPacket()).thenReturn(true, false);
        Mockito.when(faultInjector.uncorruptPacket()).thenReturn(false);
        // the client should give up pipeline reconstruction after retries.
        try {
            for (int i = 0; i < 5; i++) {
                out.write(data, 0, 65535);
            }
            out.close();
            fail("Write did not fail");
        } catch (IOException ioe) {
            // we should get an ioe
            DFSClient.LOG.info("Got expected exception", ioe);
        }
    } finally {
        if (cluster != null) {
            cluster.shutdown();
        }
        Mockito.when(faultInjector.corruptPacket()).thenReturn(false);
        Mockito.when(faultInjector.uncorruptPacket()).thenReturn(false);
    }
}
Also used : Path(org.apache.hadoop.fs.Path) Configuration(org.apache.hadoop.conf.Configuration) FileSystem(org.apache.hadoop.fs.FileSystem) FSDataInputStream(org.apache.hadoop.fs.FSDataInputStream) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream) IOException(java.io.IOException) Test(org.junit.Test)

Example 25 with FSDataOutputStream

use of org.apache.hadoop.fs.FSDataOutputStream in project hadoop by apache.

the class TestAppendDifferentChecksum method testAlgoSwitchRandomized.

/**
   * Test which randomly alternates between appending with
   * CRC32 and with CRC32C, crossing several block boundaries.
   * Then, checks that all of the data can be read back correct.
   */
@Test(timeout = RANDOM_TEST_RUNTIME * 2)
public void testAlgoSwitchRandomized() throws IOException {
    FileSystem fsWithCrc32 = createFsWithChecksum("CRC32", 512);
    FileSystem fsWithCrc32C = createFsWithChecksum("CRC32C", 512);
    Path p = new Path("/testAlgoSwitchRandomized");
    long seed = Time.now();
    System.out.println("seed: " + seed);
    Random r = new Random(seed);
    // Create empty to start
    IOUtils.closeStream(fsWithCrc32.create(p));
    long st = Time.now();
    int len = 0;
    while (Time.now() - st < RANDOM_TEST_RUNTIME) {
        int thisLen = r.nextInt(500);
        FileSystem fs = (r.nextBoolean() ? fsWithCrc32 : fsWithCrc32C);
        FSDataOutputStream stm = fs.append(p);
        try {
            AppendTestUtil.write(stm, len, thisLen);
        } finally {
            stm.close();
        }
        len += thisLen;
    }
    AppendTestUtil.check(fsWithCrc32, p, len);
    AppendTestUtil.check(fsWithCrc32C, p, len);
}
Also used : Path(org.apache.hadoop.fs.Path) Random(java.util.Random) FileSystem(org.apache.hadoop.fs.FileSystem) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream) Test(org.junit.Test)

Aggregations

FSDataOutputStream (org.apache.hadoop.fs.FSDataOutputStream)789 Path (org.apache.hadoop.fs.Path)618 Test (org.junit.Test)345 FileSystem (org.apache.hadoop.fs.FileSystem)248 Configuration (org.apache.hadoop.conf.Configuration)190 IOException (java.io.IOException)163 FSDataInputStream (org.apache.hadoop.fs.FSDataInputStream)94 IgfsPath (org.apache.ignite.igfs.IgfsPath)78 DistributedFileSystem (org.apache.hadoop.hdfs.DistributedFileSystem)66 MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)65 FileStatus (org.apache.hadoop.fs.FileStatus)57 FsPermission (org.apache.hadoop.fs.permission.FsPermission)45 CreateFlag (org.apache.hadoop.fs.CreateFlag)43 FileNotFoundException (java.io.FileNotFoundException)40 HdfsConfiguration (org.apache.hadoop.hdfs.HdfsConfiguration)40 ArrayList (java.util.ArrayList)38 LocatedBlock (org.apache.hadoop.hdfs.protocol.LocatedBlock)33 LocatedBlocks (org.apache.hadoop.hdfs.protocol.LocatedBlocks)31 DatanodeInfo (org.apache.hadoop.hdfs.protocol.DatanodeInfo)30 Random (java.util.Random)28