Search in sources :

Example 16 with ChecksumException

use of org.apache.hadoop.fs.ChecksumException in project hadoop by apache.

the class TestBlockReaderLocalLegacy method testStablePositionAfterCorruptRead.

/**
   * Test that, in the case of an error, the position and limit of a ByteBuffer
   * are left unchanged. This is not mandated by ByteBufferReadable, but clients
   * of this class might immediately issue a retry on failure, so it's polite.
   */
@Test
public void testStablePositionAfterCorruptRead() throws Exception {
    final short REPL_FACTOR = 1;
    final long FILE_LENGTH = 512L;
    HdfsConfiguration conf = getConfiguration(null);
    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
    cluster.waitActive();
    FileSystem fs = cluster.getFileSystem();
    Path path = new Path("/corrupted");
    DFSTestUtil.createFile(fs, path, FILE_LENGTH, REPL_FACTOR, 12345L);
    DFSTestUtil.waitReplication(fs, path, REPL_FACTOR);
    ExtendedBlock block = DFSTestUtil.getFirstBlock(fs, path);
    int blockFilesCorrupted = cluster.corruptBlockOnDataNodes(block);
    assertEquals("All replicas not corrupted", REPL_FACTOR, blockFilesCorrupted);
    FSDataInputStream dis = cluster.getFileSystem().open(path);
    ByteBuffer buf = ByteBuffer.allocateDirect((int) FILE_LENGTH);
    boolean sawException = false;
    try {
        dis.read(buf);
    } catch (ChecksumException ex) {
        sawException = true;
    }
    assertTrue(sawException);
    assertEquals(0, buf.position());
    assertEquals(buf.capacity(), buf.limit());
    dis = cluster.getFileSystem().open(path);
    buf.position(3);
    buf.limit(25);
    sawException = false;
    try {
        dis.read(buf);
    } catch (ChecksumException ex) {
        sawException = true;
    }
    assertTrue(sawException);
    assertEquals(3, buf.position());
    assertEquals(25, buf.limit());
    cluster.shutdown();
}
Also used : Path(org.apache.hadoop.fs.Path) MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) ChecksumException(org.apache.hadoop.fs.ChecksumException) ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) ByteBuffer(java.nio.ByteBuffer) FileSystem(org.apache.hadoop.fs.FileSystem) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) FSDataInputStream(org.apache.hadoop.fs.FSDataInputStream) Test(org.junit.Test)

Example 17 with ChecksumException

use of org.apache.hadoop.fs.ChecksumException in project hadoop by apache.

the class TestEditLog method testEditChecksum.

@Test
public void testEditChecksum() throws Exception {
    // start a cluster 
    Configuration conf = getConf();
    MiniDFSCluster cluster = null;
    FileSystem fileSys = null;
    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(NUM_DATA_NODES).build();
    cluster.waitActive();
    fileSys = cluster.getFileSystem();
    final FSNamesystem namesystem = cluster.getNamesystem();
    FSImage fsimage = namesystem.getFSImage();
    final FSEditLog editLog = fsimage.getEditLog();
    fileSys.mkdirs(new Path("/tmp"));
    Iterator<StorageDirectory> iter = fsimage.getStorage().dirIterator(NameNodeDirType.EDITS);
    LinkedList<StorageDirectory> sds = new LinkedList<StorageDirectory>();
    while (iter.hasNext()) {
        sds.add(iter.next());
    }
    editLog.close();
    cluster.shutdown();
    for (StorageDirectory sd : sds) {
        File editFile = NNStorage.getFinalizedEditsFile(sd, 1, 3);
        assertTrue(editFile.exists());
        long fileLen = editFile.length();
        LOG.debug("Corrupting Log File: " + editFile + " len: " + fileLen);
        RandomAccessFile rwf = new RandomAccessFile(editFile, "rw");
        // seek to checksum bytes
        rwf.seek(fileLen - 4);
        int b = rwf.readInt();
        rwf.seek(fileLen - 4);
        rwf.writeInt(b + 1);
        rwf.close();
    }
    try {
        cluster = new MiniDFSCluster.Builder(conf).numDataNodes(NUM_DATA_NODES).format(false).build();
        fail("should not be able to start");
    } catch (IOException e) {
        // expected
        assertNotNull("Cause of exception should be ChecksumException", e.getCause());
        assertEquals("Cause of exception should be ChecksumException", ChecksumException.class, e.getCause().getClass());
    }
}
Also used : Path(org.apache.hadoop.fs.Path) MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) Configuration(org.apache.hadoop.conf.Configuration) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) ChecksumException(org.apache.hadoop.fs.ChecksumException) StorageDirectory(org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory) IOException(java.io.IOException) LinkedList(java.util.LinkedList) RandomAccessFile(java.io.RandomAccessFile) FileSystem(org.apache.hadoop.fs.FileSystem) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) RandomAccessFile(java.io.RandomAccessFile) File(java.io.File) Test(org.junit.Test)

Example 18 with ChecksumException

use of org.apache.hadoop.fs.ChecksumException in project hadoop by apache.

the class TestFsck method testUnderMinReplicatedBlock.

@Test
public void testUnderMinReplicatedBlock() throws Exception {
    conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 1000);
    // Set short retry timeouts so this test runs faster
    conf.setInt(HdfsClientConfigKeys.Retry.WINDOW_BASE_KEY, 10);
    // Set minReplication to 2
    short minReplication = 2;
    conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_MIN_KEY, minReplication);
    FileSystem fs = null;
    DFSClient dfsClient = null;
    LocatedBlocks blocks = null;
    int replicaCount = 0;
    Random random = new Random();
    String outStr = null;
    short factor = 1;
    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
    cluster.waitActive();
    fs = cluster.getFileSystem();
    Path file1 = new Path("/testUnderMinReplicatedBlock");
    DFSTestUtil.createFile(fs, file1, 1024, minReplication, 0);
    // Wait until file replication has completed
    DFSTestUtil.waitReplication(fs, file1, minReplication);
    ExtendedBlock block = DFSTestUtil.getFirstBlock(fs, file1);
    // Make sure filesystem is in healthy state
    outStr = runFsck(conf, 0, true, "/");
    System.out.println(outStr);
    assertTrue(outStr.contains(NamenodeFsck.HEALTHY_STATUS));
    // corrupt the first replica
    File blockFile = cluster.getBlockFile(0, block);
    if (blockFile != null && blockFile.exists()) {
        RandomAccessFile raFile = new RandomAccessFile(blockFile, "rw");
        FileChannel channel = raFile.getChannel();
        String badString = "BADBAD";
        int rand = random.nextInt((int) channel.size() / 2);
        raFile.seek(rand);
        raFile.write(badString.getBytes());
        raFile.close();
    }
    dfsClient = new DFSClient(new InetSocketAddress("localhost", cluster.getNameNodePort()), conf);
    blocks = dfsClient.getNamenode().getBlockLocations(file1.toString(), 0, Long.MAX_VALUE);
    replicaCount = blocks.get(0).getLocations().length;
    while (replicaCount != factor) {
        try {
            Thread.sleep(100);
            // Read the file to trigger reportBadBlocks
            try {
                IOUtils.copyBytes(fs.open(file1), new IOUtils.NullOutputStream(), conf, true);
            } catch (IOException ie) {
                assertTrue(ie instanceof ChecksumException);
            }
            System.out.println("sleep in try: replicaCount=" + replicaCount + "  factor=" + factor);
        } catch (InterruptedException ignore) {
        }
        blocks = dfsClient.getNamenode().getBlockLocations(file1.toString(), 0, Long.MAX_VALUE);
        replicaCount = blocks.get(0).getLocations().length;
    }
    // Check if fsck reports the same
    outStr = runFsck(conf, 0, true, "/");
    System.out.println(outStr);
    assertTrue(outStr.contains(NamenodeFsck.HEALTHY_STATUS));
    assertTrue(outStr.contains("UNDER MIN REPL'D BLOCKS:\t1 (100.0 %)"));
    assertTrue(outStr.contains("dfs.namenode.replication.min:\t2"));
}
Also used : DFSClient(org.apache.hadoop.hdfs.DFSClient) Path(org.apache.hadoop.fs.Path) MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) FileChannel(java.nio.channels.FileChannel) InetSocketAddress(java.net.InetSocketAddress) ChecksumException(org.apache.hadoop.fs.ChecksumException) LocatedBlocks(org.apache.hadoop.hdfs.protocol.LocatedBlocks) ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) Matchers.anyString(org.mockito.Matchers.anyString) IOException(java.io.IOException) IOUtils(org.apache.hadoop.io.IOUtils) Random(java.util.Random) RandomAccessFile(java.io.RandomAccessFile) FileSystem(org.apache.hadoop.fs.FileSystem) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) RandomAccessFile(java.io.RandomAccessFile) File(java.io.File) Test(org.junit.Test)

Example 19 with ChecksumException

use of org.apache.hadoop.fs.ChecksumException in project hadoop by apache.

the class TestIFileStreams method testBadLength.

@Test
public void testBadLength() throws Exception {
    final int DLEN = 100;
    DataOutputBuffer dob = new DataOutputBuffer(DLEN + 4);
    IFileOutputStream ifos = new IFileOutputStream(dob);
    for (int i = 0; i < DLEN; ++i) {
        ifos.write(i);
    }
    ifos.close();
    DataInputBuffer dib = new DataInputBuffer();
    dib.reset(dob.getData(), DLEN + 4);
    IFileInputStream ifis = new IFileInputStream(dib, 100, new Configuration());
    int i = 0;
    try {
        while (i < DLEN - 8) {
            assertEquals(i++, ifis.read());
        }
        ifis.close();
    } catch (ChecksumException e) {
        assertEquals("Checksum before close", i, DLEN - 8);
        return;
    }
    fail("Did not detect bad data in checksum");
}
Also used : DataInputBuffer(org.apache.hadoop.io.DataInputBuffer) Configuration(org.apache.hadoop.conf.Configuration) ChecksumException(org.apache.hadoop.fs.ChecksumException) DataOutputBuffer(org.apache.hadoop.io.DataOutputBuffer) Test(org.junit.Test)

Example 20 with ChecksumException

use of org.apache.hadoop.fs.ChecksumException in project hadoop by apache.

the class TestIFileStreams method testBadIFileStream.

@Test
public void testBadIFileStream() throws Exception {
    final int DLEN = 100;
    DataOutputBuffer dob = new DataOutputBuffer(DLEN + 4);
    IFileOutputStream ifos = new IFileOutputStream(dob);
    for (int i = 0; i < DLEN; ++i) {
        ifos.write(i);
    }
    ifos.close();
    DataInputBuffer dib = new DataInputBuffer();
    final byte[] b = dob.getData();
    ++b[17];
    dib.reset(b, DLEN + 4);
    IFileInputStream ifis = new IFileInputStream(dib, 104, new Configuration());
    int i = 0;
    try {
        while (i < DLEN) {
            if (17 == i) {
                assertEquals(18, ifis.read());
            } else {
                assertEquals(i, ifis.read());
            }
            ++i;
        }
        ifis.close();
    } catch (ChecksumException e) {
        assertEquals("Unexpected bad checksum", DLEN - 1, i);
        return;
    }
    fail("Did not detect bad data in checksum");
}
Also used : DataInputBuffer(org.apache.hadoop.io.DataInputBuffer) Configuration(org.apache.hadoop.conf.Configuration) ChecksumException(org.apache.hadoop.fs.ChecksumException) DataOutputBuffer(org.apache.hadoop.io.DataOutputBuffer) Test(org.junit.Test)

Aggregations

ChecksumException (org.apache.hadoop.fs.ChecksumException)20 Path (org.apache.hadoop.fs.Path)12 Test (org.junit.Test)12 IOException (java.io.IOException)9 Configuration (org.apache.hadoop.conf.Configuration)7 FileSystem (org.apache.hadoop.fs.FileSystem)7 ExtendedBlock (org.apache.hadoop.hdfs.protocol.ExtendedBlock)6 MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)5 File (java.io.File)4 RandomAccessFile (java.io.RandomAccessFile)4 FSDataInputStream (org.apache.hadoop.fs.FSDataInputStream)4 FSDataOutputStream (org.apache.hadoop.fs.FSDataOutputStream)4 DistributedFileSystem (org.apache.hadoop.hdfs.DistributedFileSystem)4 DataOutputStream (java.io.DataOutputStream)3 InterruptedIOException (java.io.InterruptedIOException)3 InetSocketAddress (java.net.InetSocketAddress)3 ByteBuffer (java.nio.ByteBuffer)3 DFSClient (org.apache.hadoop.hdfs.DFSClient)3 HdfsConfiguration (org.apache.hadoop.hdfs.HdfsConfiguration)3 LocatedBlocks (org.apache.hadoop.hdfs.protocol.LocatedBlocks)3