Search in sources :

Example 1 with DatanodeManager

use of org.apache.hadoop.hdfs.server.blockmanagement.DatanodeManager in project hadoop by apache.

the class TestBlocksScheduledCounter method testScheduledBlocksCounterShouldDecrementOnAbandonBlock.

/**
   * Abandon block should decrement the scheduledBlocks count for the dataNode.
   */
@Test
public void testScheduledBlocksCounterShouldDecrementOnAbandonBlock() throws Exception {
    cluster = new MiniDFSCluster.Builder(new HdfsConfiguration()).numDataNodes(2).build();
    cluster.waitActive();
    fs = cluster.getFileSystem();
    DatanodeManager datanodeManager = cluster.getNamesystem().getBlockManager().getDatanodeManager();
    ArrayList<DatanodeDescriptor> dnList = new ArrayList<DatanodeDescriptor>();
    datanodeManager.fetchDatanodes(dnList, dnList, false);
    for (DatanodeDescriptor descriptor : dnList) {
        assertEquals("Blocks scheduled should be 0 for " + descriptor.getName(), 0, descriptor.getBlocksScheduled());
    }
    cluster.getDataNodes().get(0).shutdown();
    // open a file an write a few bytes:
    FSDataOutputStream out = fs.create(new Path("/testBlockScheduledCounter"), (short) 2);
    for (int i = 0; i < 1024; i++) {
        out.write(i);
    }
    // flush to make sure a block is allocated.
    out.hflush();
    DatanodeDescriptor abandonedDn = datanodeManager.getDatanode(cluster.getDataNodes().get(0).getDatanodeId());
    assertEquals("for the abandoned dn scheduled counts should be 0", 0, abandonedDn.getBlocksScheduled());
    for (DatanodeDescriptor descriptor : dnList) {
        if (descriptor.equals(abandonedDn)) {
            continue;
        }
        assertEquals("Blocks scheduled should be 1 for " + descriptor.getName(), 1, descriptor.getBlocksScheduled());
    }
    // close the file and the counter should go to zero.
    out.close();
    for (DatanodeDescriptor descriptor : dnList) {
        assertEquals("Blocks scheduled should be 0 for " + descriptor.getName(), 0, descriptor.getBlocksScheduled());
    }
}
Also used : DatanodeDescriptor(org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor) Path(org.apache.hadoop.fs.Path) DatanodeManager(org.apache.hadoop.hdfs.server.blockmanagement.DatanodeManager) ArrayList(java.util.ArrayList) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream) Test(org.junit.Test)

Example 2 with DatanodeManager

use of org.apache.hadoop.hdfs.server.blockmanagement.DatanodeManager in project hadoop by apache.

the class TestBlocksScheduledCounter method testBlocksScheduledCounter.

@Test
public void testBlocksScheduledCounter() throws IOException {
    cluster = new MiniDFSCluster.Builder(new HdfsConfiguration()).build();
    cluster.waitActive();
    fs = cluster.getFileSystem();
    //open a file an write a few bytes:
    FSDataOutputStream out = fs.create(new Path("/testBlockScheduledCounter"));
    for (int i = 0; i < 1024; i++) {
        out.write(i);
    }
    // flush to make sure a block is allocated.
    out.hflush();
    ArrayList<DatanodeDescriptor> dnList = new ArrayList<DatanodeDescriptor>();
    final DatanodeManager dm = cluster.getNamesystem().getBlockManager().getDatanodeManager();
    dm.fetchDatanodes(dnList, dnList, false);
    DatanodeDescriptor dn = dnList.get(0);
    assertEquals(1, dn.getBlocksScheduled());
    // close the file and the counter should go to zero.
    out.close();
    assertEquals(0, dn.getBlocksScheduled());
}
Also used : Path(org.apache.hadoop.fs.Path) DatanodeDescriptor(org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor) DatanodeManager(org.apache.hadoop.hdfs.server.blockmanagement.DatanodeManager) ArrayList(java.util.ArrayList) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream) Test(org.junit.Test)

Example 3 with DatanodeManager

use of org.apache.hadoop.hdfs.server.blockmanagement.DatanodeManager in project hadoop by apache.

the class FSNamesystem method datanodeReport.

DatanodeInfo[] datanodeReport(final DatanodeReportType type) throws AccessControlException, StandbyException {
    checkSuperuserPrivilege();
    checkOperation(OperationCategory.UNCHECKED);
    readLock();
    try {
        checkOperation(OperationCategory.UNCHECKED);
        final DatanodeManager dm = getBlockManager().getDatanodeManager();
        final List<DatanodeDescriptor> results = dm.getDatanodeListForReport(type);
        DatanodeInfo[] arr = new DatanodeInfo[results.size()];
        for (int i = 0; i < arr.length; i++) {
            arr[i] = new DatanodeInfoBuilder().setFrom(results.get(i)).build();
        }
        return arr;
    } finally {
        readUnlock("datanodeReport");
    }
}
Also used : DatanodeDescriptor(org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor) DatanodeManager(org.apache.hadoop.hdfs.server.blockmanagement.DatanodeManager) DatanodeInfo(org.apache.hadoop.hdfs.protocol.DatanodeInfo) DatanodeInfoBuilder(org.apache.hadoop.hdfs.protocol.DatanodeInfo.DatanodeInfoBuilder)

Example 4 with DatanodeManager

use of org.apache.hadoop.hdfs.server.blockmanagement.DatanodeManager in project hadoop by apache.

the class FSNamesystem method getAdditionalDatanode.

/** @see ClientProtocol#getAdditionalDatanode */
LocatedBlock getAdditionalDatanode(String src, long fileId, final ExtendedBlock blk, final DatanodeInfo[] existings, final String[] storageIDs, final Set<Node> excludes, final int numAdditionalNodes, final String clientName) throws IOException {
    //check if the feature is enabled
    dtpReplaceDatanodeOnFailure.checkEnabled();
    Node clientnode = null;
    String clientMachine;
    final long preferredblocksize;
    final byte storagePolicyID;
    final List<DatanodeStorageInfo> chosen;
    final BlockType blockType;
    checkOperation(OperationCategory.READ);
    FSPermissionChecker pc = getPermissionChecker();
    readLock();
    try {
        checkOperation(OperationCategory.READ);
        //check safe mode
        checkNameNodeSafeMode("Cannot add datanode; src=" + src + ", blk=" + blk);
        final INodesInPath iip = dir.resolvePath(pc, src, fileId);
        src = iip.getPath();
        //check lease
        final INodeFile file = checkLease(iip, clientName, fileId);
        clientMachine = file.getFileUnderConstructionFeature().getClientMachine();
        clientnode = blockManager.getDatanodeManager().getDatanodeByHost(clientMachine);
        preferredblocksize = file.getPreferredBlockSize();
        storagePolicyID = file.getStoragePolicyID();
        blockType = file.getBlockType();
        //find datanode storages
        final DatanodeManager dm = blockManager.getDatanodeManager();
        chosen = Arrays.asList(dm.getDatanodeStorageInfos(existings, storageIDs, "src=%s, fileId=%d, blk=%s, clientName=%s, clientMachine=%s", src, fileId, blk, clientName, clientMachine));
    } finally {
        readUnlock("getAdditionalDatanode");
    }
    if (clientnode == null) {
        clientnode = FSDirWriteFileOp.getClientNode(blockManager, clientMachine);
    }
    // choose new datanodes.
    final DatanodeStorageInfo[] targets = blockManager.chooseTarget4AdditionalDatanode(src, numAdditionalNodes, clientnode, chosen, excludes, preferredblocksize, storagePolicyID, blockType);
    final LocatedBlock lb = BlockManager.newLocatedBlock(blk, targets, -1, false);
    blockManager.setBlockToken(lb, BlockTokenIdentifier.AccessMode.COPY);
    return lb;
}
Also used : Node(org.apache.hadoop.net.Node) LocatedBlock(org.apache.hadoop.hdfs.protocol.LocatedBlock) DatanodeManager(org.apache.hadoop.hdfs.server.blockmanagement.DatanodeManager) DatanodeStorageInfo(org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo) BlockType(org.apache.hadoop.hdfs.protocol.BlockType)

Example 5 with DatanodeManager

use of org.apache.hadoop.hdfs.server.blockmanagement.DatanodeManager in project hadoop by apache.

the class TestFsck method testFsckFileNotFound.

/** Test fsck with FileNotFound. */
@Test
public void testFsckFileNotFound() throws Exception {
    // Number of replicas to actually start
    final short numReplicas = 1;
    NameNode namenode = mock(NameNode.class);
    NetworkTopology nettop = mock(NetworkTopology.class);
    Map<String, String[]> pmap = new HashMap<>();
    Writer result = new StringWriter();
    PrintWriter out = new PrintWriter(result, true);
    InetAddress remoteAddress = InetAddress.getLocalHost();
    FSNamesystem fsName = mock(FSNamesystem.class);
    FSDirectory fsd = mock(FSDirectory.class);
    BlockManager blockManager = mock(BlockManager.class);
    DatanodeManager dnManager = mock(DatanodeManager.class);
    INodesInPath iip = mock(INodesInPath.class);
    when(namenode.getNamesystem()).thenReturn(fsName);
    when(fsName.getBlockManager()).thenReturn(blockManager);
    when(fsName.getFSDirectory()).thenReturn(fsd);
    when(fsd.getFSNamesystem()).thenReturn(fsName);
    when(fsd.resolvePath(anyObject(), anyString(), any(DirOp.class))).thenReturn(iip);
    when(blockManager.getDatanodeManager()).thenReturn(dnManager);
    NamenodeFsck fsck = new NamenodeFsck(conf, namenode, nettop, pmap, out, numReplicas, remoteAddress);
    String pathString = "/tmp/testFile";
    long length = 123L;
    boolean isDir = false;
    int blockReplication = 1;
    long blockSize = 128 * 1024L;
    long modTime = 123123123L;
    long accessTime = 123123120L;
    FsPermission perms = FsPermission.getDefault();
    String owner = "foo";
    String group = "bar";
    byte[] symlink = null;
    byte[] path = DFSUtil.string2Bytes(pathString);
    long fileId = 312321L;
    int numChildren = 1;
    byte storagePolicy = 0;
    HdfsFileStatus file = new HdfsFileStatus(length, isDir, blockReplication, blockSize, modTime, accessTime, perms, owner, group, symlink, path, fileId, numChildren, null, storagePolicy, null);
    Result replRes = new ReplicationResult(conf);
    Result ecRes = new ErasureCodingResult(conf);
    try {
        fsck.check(pathString, file, replRes, ecRes);
    } catch (Exception e) {
        fail("Unexpected exception " + e.getMessage());
    }
    assertTrue(replRes.isHealthy());
}
Also used : DirOp(org.apache.hadoop.hdfs.server.namenode.FSDirectory.DirOp) HashMap(java.util.HashMap) Matchers.anyString(org.mockito.Matchers.anyString) ReplicationResult(org.apache.hadoop.hdfs.server.namenode.NamenodeFsck.ReplicationResult) Result(org.apache.hadoop.hdfs.server.namenode.NamenodeFsck.Result) ErasureCodingResult(org.apache.hadoop.hdfs.server.namenode.NamenodeFsck.ErasureCodingResult) ReplicationResult(org.apache.hadoop.hdfs.server.namenode.NamenodeFsck.ReplicationResult) ErasureCodingResult(org.apache.hadoop.hdfs.server.namenode.NamenodeFsck.ErasureCodingResult) StringWriter(java.io.StringWriter) HdfsFileStatus(org.apache.hadoop.hdfs.protocol.HdfsFileStatus) FsPermission(org.apache.hadoop.fs.permission.FsPermission) PrintWriter(java.io.PrintWriter) IOException(java.io.IOException) ChecksumException(org.apache.hadoop.fs.ChecksumException) TimeoutException(java.util.concurrent.TimeoutException) UnresolvedLinkException(org.apache.hadoop.fs.UnresolvedLinkException) FileNotFoundException(java.io.FileNotFoundException) AccessControlException(org.apache.hadoop.security.AccessControlException) DatanodeManager(org.apache.hadoop.hdfs.server.blockmanagement.DatanodeManager) BlockManager(org.apache.hadoop.hdfs.server.blockmanagement.BlockManager) NetworkTopology(org.apache.hadoop.net.NetworkTopology) InetAddress(java.net.InetAddress) PrintWriter(java.io.PrintWriter) StringWriter(java.io.StringWriter) Writer(java.io.Writer) HostsFileWriter(org.apache.hadoop.hdfs.util.HostsFileWriter) Test(org.junit.Test)

Aggregations

DatanodeManager (org.apache.hadoop.hdfs.server.blockmanagement.DatanodeManager)39 Test (org.junit.Test)30 Path (org.apache.hadoop.fs.Path)21 DatanodeDescriptor (org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor)21 DatanodeInfo (org.apache.hadoop.hdfs.protocol.DatanodeInfo)12 DistributedFileSystem (org.apache.hadoop.hdfs.DistributedFileSystem)9 MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)9 File (java.io.File)8 ArrayList (java.util.ArrayList)8 DataNode (org.apache.hadoop.hdfs.server.datanode.DataNode)8 FSDataOutputStream (org.apache.hadoop.fs.FSDataOutputStream)7 BlockManager (org.apache.hadoop.hdfs.server.blockmanagement.BlockManager)7 Configuration (org.apache.hadoop.conf.Configuration)6 LocatedBlock (org.apache.hadoop.hdfs.protocol.LocatedBlock)6 IOException (java.io.IOException)5 FileNotFoundException (java.io.FileNotFoundException)4 TimeoutException (java.util.concurrent.TimeoutException)4 ChecksumException (org.apache.hadoop.fs.ChecksumException)4 FileSystem (org.apache.hadoop.fs.FileSystem)4 UnresolvedLinkException (org.apache.hadoop.fs.UnresolvedLinkException)4