Search in sources :

Example 1 with MaterializedReplica

use of org.apache.hadoop.hdfs.server.datanode.FsDatasetTestUtils.MaterializedReplica in project hadoop by apache.

the class TestBlockScanner method testIgnoreMisplacedBlock.

/**
   * Test that blocks which are in the wrong location are ignored.
   */
@Test(timeout = 120000)
public void testIgnoreMisplacedBlock() throws Exception {
    Configuration conf = new Configuration();
    // Set a really long scan period.
    conf.setLong(DFS_DATANODE_SCAN_PERIOD_HOURS_KEY, 100L);
    conf.set(INTERNAL_VOLUME_SCANNER_SCAN_RESULT_HANDLER, TestScanResultHandler.class.getName());
    conf.setLong(INTERNAL_DFS_BLOCK_SCANNER_CURSOR_SAVE_INTERVAL_MS, 0L);
    final TestContext ctx = new TestContext(conf, 1);
    final int NUM_FILES = 4;
    ctx.createFiles(0, NUM_FILES, 5);
    MaterializedReplica unreachableReplica = ctx.getMaterializedReplica(0, 1);
    ExtendedBlock unreachableBlock = ctx.getFileBlock(0, 1);
    unreachableReplica.makeUnreachable();
    final TestScanResultHandler.Info info = TestScanResultHandler.getInfo(ctx.volumes.get(0));
    String storageID = ctx.volumes.get(0).getStorageID();
    synchronized (info) {
        info.sem = new Semaphore(NUM_FILES);
        info.shouldRun = true;
        info.notify();
    }
    // Scan the first 4 blocks
    LOG.info("Waiting for the blocks to be scanned.");
    GenericTestUtils.waitFor(new Supplier<Boolean>() {

        @Override
        public Boolean get() {
            synchronized (info) {
                if (info.blocksScanned >= NUM_FILES - 1) {
                    LOG.info("info = {}.  blockScanned has now reached " + info.blocksScanned, info);
                    return true;
                } else {
                    LOG.info("info = {}.  Waiting for blockScanned to reach " + (NUM_FILES - 1), info);
                    return false;
                }
            }
        }
    }, 50, 30000);
    // We should have scanned 4 blocks
    synchronized (info) {
        assertFalse(info.goodBlocks.contains(unreachableBlock));
        assertFalse(info.badBlocks.contains(unreachableBlock));
        assertEquals("Expected 3 good blocks.", 3, info.goodBlocks.size());
        info.goodBlocks.clear();
        assertEquals("Expected 3 blocksScanned", 3, info.blocksScanned);
        assertEquals("Did not expect bad blocks.", 0, info.badBlocks.size());
        info.blocksScanned = 0;
    }
    info.sem.release(1);
}
Also used : Configuration(org.apache.hadoop.conf.Configuration) MaterializedReplica(org.apache.hadoop.hdfs.server.datanode.FsDatasetTestUtils.MaterializedReplica) ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) Semaphore(java.util.concurrent.Semaphore) Test(org.junit.Test)

Example 2 with MaterializedReplica

use of org.apache.hadoop.hdfs.server.datanode.FsDatasetTestUtils.MaterializedReplica in project hadoop by apache.

the class TestDFSShell method getMaterializedReplicas.

private static List<MaterializedReplica> getMaterializedReplicas(MiniDFSCluster cluster) throws IOException {
    List<MaterializedReplica> replicas = new ArrayList<>();
    String poolId = cluster.getNamesystem().getBlockPoolId();
    List<Map<DatanodeStorage, BlockListAsLongs>> blocks = cluster.getAllBlockReports(poolId);
    for (int i = 0; i < blocks.size(); i++) {
        Map<DatanodeStorage, BlockListAsLongs> map = blocks.get(i);
        for (Map.Entry<DatanodeStorage, BlockListAsLongs> e : map.entrySet()) {
            for (Block b : e.getValue()) {
                replicas.add(cluster.getMaterializedReplica(i, new ExtendedBlock(poolId, b)));
            }
        }
    }
    return replicas;
}
Also used : MaterializedReplica(org.apache.hadoop.hdfs.server.datanode.FsDatasetTestUtils.MaterializedReplica) ArrayList(java.util.ArrayList) ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) StringContains.containsString(org.hamcrest.core.StringContains.containsString) DatanodeStorage(org.apache.hadoop.hdfs.server.protocol.DatanodeStorage) BlockListAsLongs(org.apache.hadoop.hdfs.protocol.BlockListAsLongs) Block(org.apache.hadoop.hdfs.protocol.Block) ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) Map(java.util.Map)

Example 3 with MaterializedReplica

use of org.apache.hadoop.hdfs.server.datanode.FsDatasetTestUtils.MaterializedReplica in project hadoop by apache.

the class TestDFSShell method testGet.

@Test(timeout = 30000)
public void testGet() throws IOException {
    GenericTestUtils.setLogLevel(FSInputChecker.LOG, Level.ALL);
    final String fname = "testGet.txt";
    Path root = new Path("/test/get");
    final Path remotef = new Path(root, fname);
    final Configuration conf = new HdfsConfiguration();
    // Set short retry timeouts so this test runs faster
    conf.setInt(HdfsClientConfigKeys.Retry.WINDOW_BASE_KEY, 10);
    TestGetRunner runner = new TestGetRunner() {

        private int count = 0;

        private final FsShell shell = new FsShell(conf);

        public String run(int exitcode, String... options) throws IOException {
            String dst = new File(TEST_ROOT_DIR, fname + ++count).getAbsolutePath();
            String[] args = new String[options.length + 3];
            args[0] = "-get";
            args[args.length - 2] = remotef.toString();
            args[args.length - 1] = dst;
            for (int i = 0; i < options.length; i++) {
                args[i + 1] = options[i];
            }
            show("args=" + Arrays.asList(args));
            try {
                assertEquals(exitcode, shell.run(args));
            } catch (Exception e) {
                assertTrue(StringUtils.stringifyException(e), false);
            }
            return exitcode == 0 ? DFSTestUtil.readFile(new File(dst)) : null;
        }
    };
    File localf = createLocalFile(new File(TEST_ROOT_DIR, fname));
    MiniDFSCluster cluster = null;
    DistributedFileSystem dfs = null;
    try {
        cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).format(true).build();
        dfs = cluster.getFileSystem();
        mkdir(dfs, root);
        dfs.copyFromLocalFile(false, false, new Path(localf.getPath()), remotef);
        String localfcontent = DFSTestUtil.readFile(localf);
        assertEquals(localfcontent, runner.run(0));
        assertEquals(localfcontent, runner.run(0, "-ignoreCrc"));
        // find block files to modify later
        List<MaterializedReplica> replicas = getMaterializedReplicas(cluster);
        // Shut down miniCluster and then corrupt the block files by overwriting a
        // portion with junk data.  We must shut down the miniCluster so that threads
        // in the data node do not hold locks on the block files while we try to
        // write into them.  Particularly on Windows, the data node's use of the
        // FileChannel.transferTo method can cause block files to be memory mapped
        // in read-only mode during the transfer to a client, and this causes a
        // locking conflict.  The call to shutdown the miniCluster blocks until all
        // DataXceiver threads exit, preventing this problem.
        dfs.close();
        cluster.shutdown();
        show("replicas=" + replicas);
        corrupt(replicas, localfcontent);
        // Start the miniCluster again, but do not reformat, so prior files remain.
        cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).format(false).build();
        dfs = cluster.getFileSystem();
        assertEquals(null, runner.run(1));
        String corruptedcontent = runner.run(0, "-ignoreCrc");
        assertEquals(localfcontent.substring(1), corruptedcontent.substring(1));
        assertEquals(localfcontent.charAt(0) + 1, corruptedcontent.charAt(0));
    } finally {
        if (null != dfs) {
            try {
                dfs.close();
            } catch (Exception e) {
            }
        }
        if (null != cluster) {
            cluster.shutdown();
        }
        localf.delete();
    }
}
Also used : Configuration(org.apache.hadoop.conf.Configuration) MaterializedReplica(org.apache.hadoop.hdfs.server.datanode.FsDatasetTestUtils.MaterializedReplica) StringContains.containsString(org.hamcrest.core.StringContains.containsString) SequenceFile(org.apache.hadoop.io.SequenceFile) Test(org.junit.Test)

Example 4 with MaterializedReplica

use of org.apache.hadoop.hdfs.server.datanode.FsDatasetTestUtils.MaterializedReplica in project hadoop by apache.

the class MiniDFSCluster method corruptBlockOnDataNodesHelper.

private int corruptBlockOnDataNodesHelper(ExtendedBlock block, boolean deleteBlockFile) throws IOException {
    int blocksCorrupted = 0;
    for (DataNode dn : getDataNodes()) {
        try {
            MaterializedReplica replica = getFsDatasetTestUtils(dn).getMaterializedReplica(block);
            if (deleteBlockFile) {
                replica.deleteData();
            } else {
                replica.corruptData();
            }
            blocksCorrupted++;
        } catch (ReplicaNotFoundException e) {
        // Ignore.
        }
    }
    return blocksCorrupted;
}
Also used : DataNode(org.apache.hadoop.hdfs.server.datanode.DataNode) MaterializedReplica(org.apache.hadoop.hdfs.server.datanode.FsDatasetTestUtils.MaterializedReplica) ReplicaNotFoundException(org.apache.hadoop.hdfs.server.datanode.ReplicaNotFoundException)

Example 5 with MaterializedReplica

use of org.apache.hadoop.hdfs.server.datanode.FsDatasetTestUtils.MaterializedReplica in project hadoop by apache.

the class TestDFSShell method corrupt.

private static void corrupt(List<MaterializedReplica> replicas, String content) throws IOException {
    StringBuilder sb = new StringBuilder(content);
    char c = content.charAt(0);
    sb.setCharAt(0, ++c);
    for (MaterializedReplica replica : replicas) {
        replica.corruptData(sb.toString().getBytes("UTF8"));
    }
}
Also used : MaterializedReplica(org.apache.hadoop.hdfs.server.datanode.FsDatasetTestUtils.MaterializedReplica)

Aggregations

MaterializedReplica (org.apache.hadoop.hdfs.server.datanode.FsDatasetTestUtils.MaterializedReplica)7 Configuration (org.apache.hadoop.conf.Configuration)4 ExtendedBlock (org.apache.hadoop.hdfs.protocol.ExtendedBlock)4 Test (org.junit.Test)4 ArrayList (java.util.ArrayList)2 FSDataOutputStream (org.apache.hadoop.fs.FSDataOutputStream)2 Path (org.apache.hadoop.fs.Path)2 StringContains.containsString (org.hamcrest.core.StringContains.containsString)2 OutputStream (java.io.OutputStream)1 InetSocketAddress (java.net.InetSocketAddress)1 Map (java.util.Map)1 Semaphore (java.util.concurrent.Semaphore)1 FileSystem (org.apache.hadoop.fs.FileSystem)1 HdfsConfiguration (org.apache.hadoop.hdfs.HdfsConfiguration)1 MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)1 Block (org.apache.hadoop.hdfs.protocol.Block)1 BlockListAsLongs (org.apache.hadoop.hdfs.protocol.BlockListAsLongs)1 DataNode (org.apache.hadoop.hdfs.server.datanode.DataNode)1 ReplicaNotFoundException (org.apache.hadoop.hdfs.server.datanode.ReplicaNotFoundException)1 FSNamesystem (org.apache.hadoop.hdfs.server.namenode.FSNamesystem)1