Search in sources :

Example 6 with DatanodeInfo

use of org.apache.hadoop.hdfs.protocol.DatanodeInfo in project hadoop by apache.

the class TestClientProtocolForPipelineRecovery method testEvictWriter.

/**
   * Test that the writer is kicked out of a node.
   */
@Test
public void testEvictWriter() throws Exception {
    Configuration conf = new HdfsConfiguration();
    MiniDFSCluster cluster = null;
    try {
        cluster = new MiniDFSCluster.Builder(conf).numDataNodes((int) 3).build();
        cluster.waitActive();
        FileSystem fs = cluster.getFileSystem();
        Path file = new Path("testEvictWriter.dat");
        FSDataOutputStream out = fs.create(file, (short) 2);
        out.write(0x31);
        out.hflush();
        // get nodes in the pipeline
        DFSOutputStream dfsOut = (DFSOutputStream) out.getWrappedStream();
        DatanodeInfo[] nodes = dfsOut.getPipeline();
        Assert.assertEquals(2, nodes.length);
        String dnAddr = nodes[1].getIpcAddr(false);
        // evict the writer from the second datanode and wait until
        // the pipeline is rebuilt.
        DFSAdmin dfsadmin = new DFSAdmin(conf);
        final String[] args1 = { "-evictWriters", dnAddr };
        Assert.assertEquals(0, dfsadmin.run(args1));
        out.write(0x31);
        out.hflush();
        // get the new pipline and check the node is not in there.
        nodes = dfsOut.getPipeline();
        try {
            Assert.assertTrue(nodes.length > 0);
            for (int i = 0; i < nodes.length; i++) {
                Assert.assertFalse(dnAddr.equals(nodes[i].getIpcAddr(false)));
            }
        } finally {
            out.close();
        }
    } finally {
        if (cluster != null) {
            cluster.shutdown();
        }
    }
}
Also used : Path(org.apache.hadoop.fs.Path) DatanodeInfo(org.apache.hadoop.hdfs.protocol.DatanodeInfo) Configuration(org.apache.hadoop.conf.Configuration) FileSystem(org.apache.hadoop.fs.FileSystem) DFSAdmin(org.apache.hadoop.hdfs.tools.DFSAdmin) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream) Test(org.junit.Test)

Example 7 with DatanodeInfo

use of org.apache.hadoop.hdfs.protocol.DatanodeInfo in project hadoop by apache.

the class TestClientProtocolForPipelineRecovery method testPacketTransmissionDelay.

@Test
public void testPacketTransmissionDelay() throws Exception {
    // Make the first datanode to not relay heartbeat packet.
    DataNodeFaultInjector dnFaultInjector = new DataNodeFaultInjector() {

        @Override
        public boolean dropHeartbeatPacket() {
            return true;
        }
    };
    DataNodeFaultInjector oldDnInjector = DataNodeFaultInjector.get();
    DataNodeFaultInjector.set(dnFaultInjector);
    // Setting the timeout to be 3 seconds. Normally heartbeat packet
    // would be sent every 1.5 seconds if there is no data traffic.
    Configuration conf = new HdfsConfiguration();
    conf.set(HdfsClientConfigKeys.DFS_CLIENT_SOCKET_TIMEOUT_KEY, "3000");
    MiniDFSCluster cluster = null;
    try {
        int numDataNodes = 2;
        cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDataNodes).build();
        cluster.waitActive();
        FileSystem fs = cluster.getFileSystem();
        FSDataOutputStream out = fs.create(new Path("noheartbeat.dat"), (short) 2);
        out.write(0x31);
        out.hflush();
        DFSOutputStream dfsOut = (DFSOutputStream) out.getWrappedStream();
        // original pipeline
        DatanodeInfo[] orgNodes = dfsOut.getPipeline();
        // Cause the second datanode to timeout on reading packet
        Thread.sleep(3500);
        out.write(0x32);
        out.hflush();
        // new pipeline
        DatanodeInfo[] newNodes = dfsOut.getPipeline();
        out.close();
        boolean contains = false;
        for (int i = 0; i < newNodes.length; i++) {
            if (orgNodes[0].getXferAddr().equals(newNodes[i].getXferAddr())) {
                throw new IOException("The first datanode should have been replaced.");
            }
            if (orgNodes[1].getXferAddr().equals(newNodes[i].getXferAddr())) {
                contains = true;
            }
        }
        Assert.assertTrue(contains);
    } finally {
        DataNodeFaultInjector.set(oldDnInjector);
        if (cluster != null) {
            cluster.shutdown();
        }
    }
}
Also used : Path(org.apache.hadoop.fs.Path) DatanodeInfo(org.apache.hadoop.hdfs.protocol.DatanodeInfo) Configuration(org.apache.hadoop.conf.Configuration) DataNodeFaultInjector(org.apache.hadoop.hdfs.server.datanode.DataNodeFaultInjector) IOException(java.io.IOException) FileSystem(org.apache.hadoop.fs.FileSystem) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream) Test(org.junit.Test)

Example 8 with DatanodeInfo

use of org.apache.hadoop.hdfs.protocol.DatanodeInfo in project hadoop by apache.

the class TestClientReportBadBlock method createAFileWithCorruptedBlockReplicas.

/**
   * Create a file with one block and corrupt some/all of the block replicas.
   */
private void createAFileWithCorruptedBlockReplicas(Path filePath, short repl, int corruptBlockCount) throws IOException, AccessControlException, FileNotFoundException, UnresolvedLinkException, InterruptedException, TimeoutException {
    DFSTestUtil.createFile(dfs, filePath, BLOCK_SIZE, repl, 0);
    DFSTestUtil.waitReplication(dfs, filePath, repl);
    // Locate the file blocks by asking name node
    final LocatedBlocks locatedblocks = dfs.dfs.getNamenode().getBlockLocations(filePath.toString(), 0L, BLOCK_SIZE);
    Assert.assertEquals(repl, locatedblocks.get(0).getLocations().length);
    // The file only has one block
    LocatedBlock lblock = locatedblocks.get(0);
    DatanodeInfo[] datanodeinfos = lblock.getLocations();
    ExtendedBlock block = lblock.getBlock();
    // corrupt some /all of the block replicas
    for (int i = 0; i < corruptBlockCount; i++) {
        DatanodeInfo dninfo = datanodeinfos[i];
        final DataNode dn = cluster.getDataNode(dninfo.getIpcPort());
        cluster.corruptReplica(dn, block);
        LOG.debug("Corrupted block " + block.getBlockName() + " on data node " + dninfo);
    }
}
Also used : DatanodeInfo(org.apache.hadoop.hdfs.protocol.DatanodeInfo) DataNode(org.apache.hadoop.hdfs.server.datanode.DataNode) LocatedBlocks(org.apache.hadoop.hdfs.protocol.LocatedBlocks) ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) LocatedBlock(org.apache.hadoop.hdfs.protocol.LocatedBlock)

Example 9 with DatanodeInfo

use of org.apache.hadoop.hdfs.protocol.DatanodeInfo in project hadoop by apache.

the class TestSafeModeWithStripedFile method doTest.

/**
   * This util writes a small block group whose size is given by caller.
   * Then write another 2 full stripe blocks.
   * Then shutdown all DNs and start again one by one. and verify the safemode
   * status accordingly.
   *
   * @param smallSize file size of the small block group
   * @param minStorages minimum replicas needed by the block so it can be safe
   */
private void doTest(int smallSize, int minStorages) throws IOException {
    FileSystem fs = cluster.getFileSystem();
    // add 1 block
    byte[] data = StripedFileTestUtil.generateBytes(smallSize);
    Path smallFilePath = new Path("/testStripedFile_" + smallSize);
    DFSTestUtil.writeFile(fs, smallFilePath, data);
    // If we only have 1 block, NN won't enter safemode in the first place
    // because the threshold is 0 blocks.
    // So we need to add another 2 blocks.
    int bigSize = blockSize * dataBlocks * 2;
    Path bigFilePath = new Path("/testStripedFile_" + bigSize);
    data = StripedFileTestUtil.generateBytes(bigSize);
    DFSTestUtil.writeFile(fs, bigFilePath, data);
    // now we have 3 blocks. NN needs 2 blocks to reach the threshold 0.9 of
    // total blocks 3.
    // stopping all DNs
    List<MiniDFSCluster.DataNodeProperties> dnprops = Lists.newArrayList();
    LocatedBlocks lbs = cluster.getNameNodeRpc().getBlockLocations(smallFilePath.toString(), 0, smallSize);
    DatanodeInfo[] locations = lbs.get(0).getLocations();
    for (DatanodeInfo loc : locations) {
        // keep the DNs that have smallFile in the head of dnprops
        dnprops.add(cluster.stopDataNode(loc.getName()));
    }
    for (int i = 0; i < numDNs - locations.length; i++) {
        dnprops.add(cluster.stopDataNode(0));
    }
    cluster.restartNameNode(0);
    NameNode nn = cluster.getNameNode();
    assertTrue(cluster.getNameNode().isInSafeMode());
    assertEquals(0, NameNodeAdapter.getSafeModeSafeBlocks(nn));
    // so the safe blocks count doesn't increment.
    for (int i = 0; i < minStorages - 1; i++) {
        cluster.restartDataNode(dnprops.remove(0));
        cluster.waitActive();
        cluster.triggerBlockReports();
        assertEquals(0, NameNodeAdapter.getSafeModeSafeBlocks(nn));
    }
    // the block of smallFile reaches minStorages,
    // so the safe blocks count increment.
    cluster.restartDataNode(dnprops.remove(0));
    cluster.waitActive();
    cluster.triggerBlockReports();
    assertEquals(1, NameNodeAdapter.getSafeModeSafeBlocks(nn));
    // the 2 blocks of bigFile need DATA_BLK_NUM storages to be safe
    for (int i = minStorages; i < dataBlocks - 1; i++) {
        cluster.restartDataNode(dnprops.remove(0));
        cluster.waitActive();
        cluster.triggerBlockReports();
        assertTrue(nn.isInSafeMode());
    }
    cluster.restartDataNode(dnprops.remove(0));
    cluster.waitActive();
    cluster.triggerBlockReports();
    assertFalse(nn.isInSafeMode());
}
Also used : Path(org.apache.hadoop.fs.Path) NameNode(org.apache.hadoop.hdfs.server.namenode.NameNode) DatanodeInfo(org.apache.hadoop.hdfs.protocol.DatanodeInfo) FileSystem(org.apache.hadoop.fs.FileSystem) LocatedBlocks(org.apache.hadoop.hdfs.protocol.LocatedBlocks)

Example 10 with DatanodeInfo

use of org.apache.hadoop.hdfs.protocol.DatanodeInfo in project hadoop by apache.

the class TestReconstructStripedFile method testProcessErasureCodingTasksSubmitionShouldSucceed.

/*
   * Tests that processErasureCodingTasks should not throw exceptions out due to
   * invalid ECTask submission.
   */
@Test
public void testProcessErasureCodingTasksSubmitionShouldSucceed() throws Exception {
    DataNode dataNode = cluster.dataNodes.get(0).datanode;
    // Pack invalid(dummy) parameters in ecTasks. Irrespective of parameters, each task
    // thread pool submission should succeed, so that it will not prevent
    // processing other tasks in the list if any exceptions.
    int size = cluster.dataNodes.size();
    byte[] liveIndices = new byte[size];
    DatanodeInfo[] dataDNs = new DatanodeInfo[size + 1];
    DatanodeStorageInfo targetDnInfos_1 = BlockManagerTestUtil.newDatanodeStorageInfo(DFSTestUtil.getLocalDatanodeDescriptor(), new DatanodeStorage("s01"));
    DatanodeStorageInfo[] dnStorageInfo = new DatanodeStorageInfo[] { targetDnInfos_1 };
    BlockECReconstructionInfo invalidECInfo = new BlockECReconstructionInfo(new ExtendedBlock("bp-id", 123456), dataDNs, dnStorageInfo, liveIndices, StripedFileTestUtil.getDefaultECPolicy());
    List<BlockECReconstructionInfo> ecTasks = new ArrayList<>();
    ecTasks.add(invalidECInfo);
    dataNode.getErasureCodingWorker().processErasureCodingTasks(ecTasks);
}
Also used : BlockECReconstructionInfo(org.apache.hadoop.hdfs.server.protocol.BlockECReconstructionCommand.BlockECReconstructionInfo) DatanodeInfo(org.apache.hadoop.hdfs.protocol.DatanodeInfo) DatanodeStorageInfo(org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo) DataNode(org.apache.hadoop.hdfs.server.datanode.DataNode) DatanodeStorage(org.apache.hadoop.hdfs.server.protocol.DatanodeStorage) ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) ArrayList(java.util.ArrayList) Test(org.junit.Test)

Aggregations

DatanodeInfo (org.apache.hadoop.hdfs.protocol.DatanodeInfo)214 Test (org.junit.Test)103 Path (org.apache.hadoop.fs.Path)91 LocatedBlock (org.apache.hadoop.hdfs.protocol.LocatedBlock)73 IOException (java.io.IOException)47 FileSystem (org.apache.hadoop.fs.FileSystem)44 ExtendedBlock (org.apache.hadoop.hdfs.protocol.ExtendedBlock)43 ArrayList (java.util.ArrayList)39 Configuration (org.apache.hadoop.conf.Configuration)38 DataNode (org.apache.hadoop.hdfs.server.datanode.DataNode)37 FSDataOutputStream (org.apache.hadoop.fs.FSDataOutputStream)32 LocatedBlocks (org.apache.hadoop.hdfs.protocol.LocatedBlocks)32 DistributedFileSystem (org.apache.hadoop.hdfs.DistributedFileSystem)29 MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)27 FSNamesystem (org.apache.hadoop.hdfs.server.namenode.FSNamesystem)25 InetSocketAddress (java.net.InetSocketAddress)20 LocatedStripedBlock (org.apache.hadoop.hdfs.protocol.LocatedStripedBlock)20 StorageType (org.apache.hadoop.fs.StorageType)18 HdfsConfiguration (org.apache.hadoop.hdfs.HdfsConfiguration)14 DatanodeInfoBuilder (org.apache.hadoop.hdfs.protocol.DatanodeInfo.DatanodeInfoBuilder)14