Search in sources :

Example 6 with DataNode

use of org.apache.hadoop.hdfs.server.datanode.DataNode in project hadoop by apache.

the class TestClientReportBadBlock method createAFileWithCorruptedBlockReplicas.

/**
   * Create a file with one block and corrupt some/all of the block replicas.
   */
private void createAFileWithCorruptedBlockReplicas(Path filePath, short repl, int corruptBlockCount) throws IOException, AccessControlException, FileNotFoundException, UnresolvedLinkException, InterruptedException, TimeoutException {
    DFSTestUtil.createFile(dfs, filePath, BLOCK_SIZE, repl, 0);
    DFSTestUtil.waitReplication(dfs, filePath, repl);
    // Locate the file blocks by asking name node
    final LocatedBlocks locatedblocks = dfs.dfs.getNamenode().getBlockLocations(filePath.toString(), 0L, BLOCK_SIZE);
    Assert.assertEquals(repl, locatedblocks.get(0).getLocations().length);
    // The file only has one block
    LocatedBlock lblock = locatedblocks.get(0);
    DatanodeInfo[] datanodeinfos = lblock.getLocations();
    ExtendedBlock block = lblock.getBlock();
    // corrupt some /all of the block replicas
    for (int i = 0; i < corruptBlockCount; i++) {
        DatanodeInfo dninfo = datanodeinfos[i];
        final DataNode dn = cluster.getDataNode(dninfo.getIpcPort());
        cluster.corruptReplica(dn, block);
        LOG.debug("Corrupted block " + block.getBlockName() + " on data node " + dninfo);
    }
}
Also used : DatanodeInfo(org.apache.hadoop.hdfs.protocol.DatanodeInfo) DataNode(org.apache.hadoop.hdfs.server.datanode.DataNode) LocatedBlocks(org.apache.hadoop.hdfs.protocol.LocatedBlocks) ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) LocatedBlock(org.apache.hadoop.hdfs.protocol.LocatedBlock)

Example 7 with DataNode

use of org.apache.hadoop.hdfs.server.datanode.DataNode in project hadoop by apache.

the class MiniDFSCluster method shutdownDataNode.

/**
   * Shutdown the datanode at a given index.
   */
public void shutdownDataNode(int dnIndex) {
    LOG.info("Shutting down DataNode " + dnIndex);
    DataNode dn = dataNodes.remove(dnIndex).datanode;
    dn.shutdown();
    numDataNodes--;
}
Also used : DataNode(org.apache.hadoop.hdfs.server.datanode.DataNode)

Example 8 with DataNode

use of org.apache.hadoop.hdfs.server.datanode.DataNode in project hadoop by apache.

the class MiniDFSCluster method stopDataNode.

/*
   * Shutdown a particular datanode
   * @param i node index
   * @return null if the node index is out of range, else the properties of the
   * removed node
   */
public synchronized DataNodeProperties stopDataNode(int i) {
    if (i < 0 || i >= dataNodes.size()) {
        return null;
    }
    DataNodeProperties dnprop = dataNodes.remove(i);
    DataNode dn = dnprop.datanode;
    LOG.info("MiniDFSCluster Stopping DataNode " + dn.getDisplayName() + " from a total of " + (dataNodes.size() + 1) + " datanodes.");
    dn.shutdown();
    numDataNodes--;
    return dnprop;
}
Also used : DataNode(org.apache.hadoop.hdfs.server.datanode.DataNode)

Example 9 with DataNode

use of org.apache.hadoop.hdfs.server.datanode.DataNode in project hadoop by apache.

the class TestReconstructStripedFile method setup.

@Before
public void setup() throws IOException {
    final Configuration conf = new Configuration();
    conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, blockSize);
    conf.setInt(DFSConfigKeys.DFS_DN_EC_RECONSTRUCTION_STRIPED_READ_BUFFER_SIZE_KEY, cellSize - 1);
    conf.setInt(DFSConfigKeys.DFS_NAMENODE_REDUNDANCY_INTERVAL_SECONDS_KEY, 1);
    conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_REDUNDANCY_CONSIDERLOAD_KEY, false);
    if (ErasureCodeNative.isNativeCodeLoaded()) {
        conf.set(CodecUtil.IO_ERASURECODE_CODEC_RS_RAWCODER_KEY, NativeRSRawErasureCoderFactory.class.getCanonicalName());
    }
    conf.set(DFSConfigKeys.DFS_NAMENODE_EC_POLICIES_ENABLED_KEY, StripedFileTestUtil.getDefaultECPolicy().getName());
    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(dnNum).build();
    cluster.waitActive();
    fs = cluster.getFileSystem();
    fs.getClient().setErasureCodingPolicy("/", StripedFileTestUtil.getDefaultECPolicy().getName());
    List<DataNode> datanodes = cluster.getDataNodes();
    for (int i = 0; i < dnNum; i++) {
        dnMap.put(datanodes.get(i).getDatanodeId(), i);
    }
}
Also used : Configuration(org.apache.hadoop.conf.Configuration) DataNode(org.apache.hadoop.hdfs.server.datanode.DataNode) NativeRSRawErasureCoderFactory(org.apache.hadoop.io.erasurecode.rawcoder.NativeRSRawErasureCoderFactory) Before(org.junit.Before)

Example 10 with DataNode

use of org.apache.hadoop.hdfs.server.datanode.DataNode in project hadoop by apache.

the class TestReconstructStripedFile method testProcessErasureCodingTasksSubmitionShouldSucceed.

/*
   * Tests that processErasureCodingTasks should not throw exceptions out due to
   * invalid ECTask submission.
   */
@Test
public void testProcessErasureCodingTasksSubmitionShouldSucceed() throws Exception {
    DataNode dataNode = cluster.dataNodes.get(0).datanode;
    // Pack invalid(dummy) parameters in ecTasks. Irrespective of parameters, each task
    // thread pool submission should succeed, so that it will not prevent
    // processing other tasks in the list if any exceptions.
    int size = cluster.dataNodes.size();
    byte[] liveIndices = new byte[size];
    DatanodeInfo[] dataDNs = new DatanodeInfo[size + 1];
    DatanodeStorageInfo targetDnInfos_1 = BlockManagerTestUtil.newDatanodeStorageInfo(DFSTestUtil.getLocalDatanodeDescriptor(), new DatanodeStorage("s01"));
    DatanodeStorageInfo[] dnStorageInfo = new DatanodeStorageInfo[] { targetDnInfos_1 };
    BlockECReconstructionInfo invalidECInfo = new BlockECReconstructionInfo(new ExtendedBlock("bp-id", 123456), dataDNs, dnStorageInfo, liveIndices, StripedFileTestUtil.getDefaultECPolicy());
    List<BlockECReconstructionInfo> ecTasks = new ArrayList<>();
    ecTasks.add(invalidECInfo);
    dataNode.getErasureCodingWorker().processErasureCodingTasks(ecTasks);
}
Also used : BlockECReconstructionInfo(org.apache.hadoop.hdfs.server.protocol.BlockECReconstructionCommand.BlockECReconstructionInfo) DatanodeInfo(org.apache.hadoop.hdfs.protocol.DatanodeInfo) DatanodeStorageInfo(org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo) DataNode(org.apache.hadoop.hdfs.server.datanode.DataNode) DatanodeStorage(org.apache.hadoop.hdfs.server.protocol.DatanodeStorage) ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) ArrayList(java.util.ArrayList) Test(org.junit.Test)

Aggregations

DataNode (org.apache.hadoop.hdfs.server.datanode.DataNode)165 Test (org.junit.Test)110 Path (org.apache.hadoop.fs.Path)78 Configuration (org.apache.hadoop.conf.Configuration)60 MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)47 HdfsConfiguration (org.apache.hadoop.hdfs.HdfsConfiguration)37 DatanodeInfo (org.apache.hadoop.hdfs.protocol.DatanodeInfo)37 ExtendedBlock (org.apache.hadoop.hdfs.protocol.ExtendedBlock)35 LocatedBlock (org.apache.hadoop.hdfs.protocol.LocatedBlock)29 FSDataOutputStream (org.apache.hadoop.fs.FSDataOutputStream)28 FileSystem (org.apache.hadoop.fs.FileSystem)27 IOException (java.io.IOException)24 DistributedFileSystem (org.apache.hadoop.hdfs.DistributedFileSystem)20 LocatedBlocks (org.apache.hadoop.hdfs.protocol.LocatedBlocks)20 ArrayList (java.util.ArrayList)17 DiskBalancerDataNode (org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerDataNode)17 File (java.io.File)15 FSNamesystem (org.apache.hadoop.hdfs.server.namenode.FSNamesystem)14 DatanodeDescriptor (org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor)13 FsDatasetSpi (org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi)12