Search in sources :

Example 1 with DatanodeStorageInfo

use of org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo in project hadoop by apache.

the class TestDatanodeRegistration method waitForBlockReport.

private boolean waitForBlockReport(final DataNode dn, final DatanodeDescriptor dnd) throws Exception {
    final DatanodeStorageInfo storage = dnd.getStorageInfos()[0];
    final long lastCount = storage.getBlockReportCount();
    dn.triggerBlockReport(new BlockReportOptions.Factory().setIncremental(false).build());
    try {
        GenericTestUtils.waitFor(new Supplier<Boolean>() {

            @Override
            public Boolean get() {
                return lastCount != storage.getBlockReportCount();
            }
        }, 10, 2000);
    } catch (TimeoutException te) {
        return false;
    }
    return true;
}
Also used : DatanodeStorageInfo(org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo) BlockReportOptions(org.apache.hadoop.hdfs.client.BlockReportOptions) TimeoutException(java.util.concurrent.TimeoutException)

Example 2 with DatanodeStorageInfo

use of org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo in project hadoop by apache.

the class BlockReportTestBase method testInterleavedBlockReports.

// See HDFS-10301
@Test(timeout = 300000)
public void testInterleavedBlockReports() throws IOException, ExecutionException, InterruptedException {
    int numConcurrentBlockReports = 3;
    DataNode dn = cluster.getDataNodes().get(DN_N0);
    final String poolId = cluster.getNamesystem().getBlockPoolId();
    LOG.info("Block pool id: " + poolId);
    final DatanodeRegistration dnR = dn.getDNRegistrationForBP(poolId);
    final StorageBlockReport[] reports = getBlockReports(dn, poolId, true, true);
    // Get the list of storage ids associated with the datanode
    // before the test
    BlockManager bm = cluster.getNameNode().getNamesystem().getBlockManager();
    final DatanodeDescriptor dnDescriptor = bm.getDatanodeManager().getDatanode(dn.getDatanodeId());
    DatanodeStorageInfo[] storageInfos = dnDescriptor.getStorageInfos();
    // Send the block report concurrently using
    // numThreads=numConcurrentBlockReports
    ExecutorService executorService = Executors.newFixedThreadPool(numConcurrentBlockReports);
    List<Future<Void>> futureList = new ArrayList<>(numConcurrentBlockReports);
    for (int i = 0; i < numConcurrentBlockReports; i++) {
        futureList.add(executorService.submit(new Callable<Void>() {

            @Override
            public Void call() throws IOException {
                sendBlockReports(dnR, poolId, reports);
                return null;
            }
        }));
    }
    for (Future<Void> future : futureList) {
        future.get();
    }
    executorService.shutdown();
    // Verify that the storages match before and after the test
    Assert.assertArrayEquals(storageInfos, dnDescriptor.getStorageInfos());
}
Also used : StorageBlockReport(org.apache.hadoop.hdfs.server.protocol.StorageBlockReport) ArrayList(java.util.ArrayList) Callable(java.util.concurrent.Callable) DatanodeDescriptor(org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor) DatanodeRegistration(org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration) DatanodeStorageInfo(org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo) BlockManager(org.apache.hadoop.hdfs.server.blockmanagement.BlockManager) ExecutorService(java.util.concurrent.ExecutorService) Future(java.util.concurrent.Future) Test(org.junit.Test)

Example 3 with DatanodeStorageInfo

use of org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo in project hadoop by apache.

the class TestAddStripedBlocks method testAddUCReplica.

/**
   * Test BlockInfoStripedUnderConstruction#addReplicaIfNotPresent in different
   * scenarios.
   */
@Test
public void testAddUCReplica() throws Exception {
    final Path file = new Path("/file1");
    final List<String> storageIDs = new ArrayList<>();
    // create an empty file
    FSDataOutputStream out = null;
    try {
        out = dfs.create(file, (short) 1);
        // 1. create the UC striped block
        FSDirectory fsdir = cluster.getNamesystem().getFSDirectory();
        INodeFile fileNode = fsdir.getINode4Write(file.toString()).asFile();
        cluster.getNamesystem().getAdditionalBlock(file.toString(), fileNode.getId(), dfs.getClient().getClientName(), null, null, null, null);
        BlockInfo lastBlock = fileNode.getLastBlock();
        DatanodeStorageInfo[] locs = lastBlock.getUnderConstructionFeature().getExpectedStorageLocations();
        byte[] indices = lastBlock.getUnderConstructionFeature().getBlockIndices();
        Assert.assertEquals(groupSize, locs.length);
        Assert.assertEquals(groupSize, indices.length);
        // 2. mimic incremental block reports and make sure the uc-replica list in
        // the BlockInfoUCStriped is correct
        int i = 0;
        for (DataNode dn : cluster.getDataNodes()) {
            final Block block = new Block(lastBlock.getBlockId() + i++, 0, lastBlock.getGenerationStamp());
            DatanodeStorage storage = new DatanodeStorage(UUID.randomUUID().toString());
            storageIDs.add(storage.getStorageID());
            StorageReceivedDeletedBlocks[] reports = DFSTestUtil.makeReportForReceivedBlock(block, BlockStatus.RECEIVING_BLOCK, storage);
            for (StorageReceivedDeletedBlocks report : reports) {
                cluster.getNamesystem().processIncrementalBlockReport(dn.getDatanodeId(), report);
            }
        }
        // make sure lastBlock is correct and the storages have been updated
        locs = lastBlock.getUnderConstructionFeature().getExpectedStorageLocations();
        indices = lastBlock.getUnderConstructionFeature().getBlockIndices();
        Assert.assertEquals(groupSize, locs.length);
        Assert.assertEquals(groupSize, indices.length);
        for (DatanodeStorageInfo newstorage : locs) {
            Assert.assertTrue(storageIDs.contains(newstorage.getStorageID()));
        }
    } finally {
        IOUtils.cleanup(null, out);
    }
    // 3. restart the namenode. mimic the full block reports and check the
    // uc-replica list again
    cluster.restartNameNode(true);
    final String bpId = cluster.getNamesystem().getBlockPoolId();
    INodeFile fileNode = cluster.getNamesystem().getFSDirectory().getINode4Write(file.toString()).asFile();
    BlockInfo lastBlock = fileNode.getLastBlock();
    int i = groupSize - 1;
    for (DataNode dn : cluster.getDataNodes()) {
        String storageID = storageIDs.get(i);
        final Block block = new Block(lastBlock.getBlockId() + i--, lastBlock.getGenerationStamp(), 0);
        DatanodeStorage storage = new DatanodeStorage(storageID);
        List<ReplicaBeingWritten> blocks = new ArrayList<>();
        ReplicaBeingWritten replica = new ReplicaBeingWritten(block, null, null, null);
        blocks.add(replica);
        BlockListAsLongs bll = BlockListAsLongs.encode(blocks);
        StorageBlockReport[] reports = { new StorageBlockReport(storage, bll) };
        cluster.getNameNodeRpc().blockReport(dn.getDNRegistrationForBP(bpId), bpId, reports, new BlockReportContext(1, 0, System.nanoTime(), 0, true));
    }
    DatanodeStorageInfo[] locs = lastBlock.getUnderConstructionFeature().getExpectedStorageLocations();
    byte[] indices = lastBlock.getUnderConstructionFeature().getBlockIndices();
    Assert.assertEquals(groupSize, locs.length);
    Assert.assertEquals(groupSize, indices.length);
    for (i = 0; i < groupSize; i++) {
        Assert.assertEquals(storageIDs.get(i), locs[groupSize - 1 - i].getStorageID());
        Assert.assertEquals(groupSize - i - 1, indices[i]);
    }
}
Also used : Path(org.apache.hadoop.fs.Path) ReplicaBeingWritten(org.apache.hadoop.hdfs.server.datanode.ReplicaBeingWritten) ArrayList(java.util.ArrayList) StorageBlockReport(org.apache.hadoop.hdfs.server.protocol.StorageBlockReport) DatanodeStorageInfo(org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo) BlockInfo(org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo) ReceivedDeletedBlockInfo(org.apache.hadoop.hdfs.server.protocol.ReceivedDeletedBlockInfo) DataNode(org.apache.hadoop.hdfs.server.datanode.DataNode) BlockReportContext(org.apache.hadoop.hdfs.server.protocol.BlockReportContext) DatanodeStorage(org.apache.hadoop.hdfs.server.protocol.DatanodeStorage) BlockListAsLongs(org.apache.hadoop.hdfs.protocol.BlockListAsLongs) LocatedBlock(org.apache.hadoop.hdfs.protocol.LocatedBlock) Block(org.apache.hadoop.hdfs.protocol.Block) LocatedStripedBlock(org.apache.hadoop.hdfs.protocol.LocatedStripedBlock) StorageReceivedDeletedBlocks(org.apache.hadoop.hdfs.server.protocol.StorageReceivedDeletedBlocks) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream) Test(org.junit.Test)

Example 4 with DatanodeStorageInfo

use of org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo in project hadoop by apache.

the class DFSTestUtil method createDatanodeStorageInfos.

public static DatanodeStorageInfo[] createDatanodeStorageInfos(int n, String[] racks, String[] hostnames, StorageType[] types) {
    DatanodeStorageInfo[] storages = new DatanodeStorageInfo[n];
    for (int i = storages.length; i > 0; ) {
        final String storageID = "s" + i;
        final String ip = i + "." + i + "." + i + "." + i;
        i--;
        final String rack = (racks != null && i < racks.length) ? racks[i] : "defaultRack";
        final String hostname = (hostnames != null && i < hostnames.length) ? hostnames[i] : "host";
        final StorageType type = (types != null && i < types.length) ? types[i] : StorageType.DEFAULT;
        storages[i] = createDatanodeStorageInfo(storageID, ip, rack, hostname, type, null);
    }
    return storages;
}
Also used : DatanodeStorageInfo(org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo) StorageType(org.apache.hadoop.fs.StorageType)

Example 5 with DatanodeStorageInfo

use of org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo in project hadoop by apache.

the class TestMaintenanceState method checkFile.

/*
  * Verify that the number of replicas are as expected for each block in
  * the given file.
  *
  * @return - null if no failure found, else an error message string.
  */
static String checkFile(FSNamesystem ns, FileSystem fileSys, Path name, int repl, DatanodeInfo expectedExcludedNode, DatanodeInfo expectedMaintenanceNode) throws IOException {
    // need a raw stream
    assertTrue("Not HDFS:" + fileSys.getUri(), fileSys instanceof DistributedFileSystem);
    HdfsDataInputStream dis = (HdfsDataInputStream) fileSys.open(name);
    BlockManager bm = ns.getBlockManager();
    Collection<LocatedBlock> dinfo = dis.getAllBlocks();
    String output;
    for (LocatedBlock blk : dinfo) {
        // for each block
        DatanodeInfo[] nodes = blk.getLocations();
        for (int j = 0; j < nodes.length; j++) {
            // for each replica
            if (expectedExcludedNode != null && nodes[j].equals(expectedExcludedNode)) {
                //excluded node must not be in LocatedBlock.
                output = "For block " + blk.getBlock() + " replica on " + nodes[j] + " found in LocatedBlock.";
                LOG.info(output);
                return output;
            } else {
                if (nodes[j].isInMaintenance()) {
                    //IN_MAINTENANCE node must not be in LocatedBlock.
                    output = "For block " + blk.getBlock() + " replica on " + nodes[j] + " which is in maintenance state.";
                    LOG.info(output);
                    return output;
                }
            }
        }
        if (repl != nodes.length) {
            output = "Wrong number of replicas for block " + blk.getBlock() + ": expected " + repl + ", got " + nodes.length + " ,";
            for (int j = 0; j < nodes.length; j++) {
                // for each replica
                output += nodes[j] + ",";
            }
            output += "pending block # " + ns.getPendingReplicationBlocks() + " ,";
            output += "under replicated # " + ns.getUnderReplicatedBlocks() + " ,";
            if (expectedExcludedNode != null) {
                output += "excluded node " + expectedExcludedNode;
            }
            LOG.info(output);
            return output;
        }
        // Verify it has the expected maintenance node
        Iterator<DatanodeStorageInfo> storageInfoIter = bm.getStorages(blk.getBlock().getLocalBlock()).iterator();
        List<DatanodeInfo> maintenanceNodes = new ArrayList<>();
        while (storageInfoIter.hasNext()) {
            DatanodeInfo node = storageInfoIter.next().getDatanodeDescriptor();
            if (node.isMaintenance()) {
                maintenanceNodes.add(node);
            }
        }
        if (expectedMaintenanceNode != null) {
            if (!maintenanceNodes.contains(expectedMaintenanceNode)) {
                output = "No maintenance replica on " + expectedMaintenanceNode;
                LOG.info(output);
                return output;
            }
        } else {
            if (maintenanceNodes.size() != 0) {
                output = "Has maintenance replica(s)";
                LOG.info(output);
                return output;
            }
        }
    }
    return null;
}
Also used : DatanodeInfo(org.apache.hadoop.hdfs.protocol.DatanodeInfo) ArrayList(java.util.ArrayList) LocatedBlock(org.apache.hadoop.hdfs.protocol.LocatedBlock) DatanodeStorageInfo(org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo) BlockManager(org.apache.hadoop.hdfs.server.blockmanagement.BlockManager) HdfsDataInputStream(org.apache.hadoop.hdfs.client.HdfsDataInputStream)

Aggregations

DatanodeStorageInfo (org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo)25 Test (org.junit.Test)10 LocatedBlock (org.apache.hadoop.hdfs.protocol.LocatedBlock)8 DatanodeDescriptor (org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor)8 BlockInfo (org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo)7 ExtendedBlock (org.apache.hadoop.hdfs.protocol.ExtendedBlock)6 BlockManager (org.apache.hadoop.hdfs.server.blockmanagement.BlockManager)6 DataNode (org.apache.hadoop.hdfs.server.datanode.DataNode)6 ArrayList (java.util.ArrayList)5 DatanodeStorage (org.apache.hadoop.hdfs.server.protocol.DatanodeStorage)5 Configuration (org.apache.hadoop.conf.Configuration)4 Block (org.apache.hadoop.hdfs.protocol.Block)4 Node (org.apache.hadoop.net.Node)4 IOException (java.io.IOException)3 DatanodeInfo (org.apache.hadoop.hdfs.protocol.DatanodeInfo)3 BlockECReconstructionInfo (org.apache.hadoop.hdfs.server.protocol.BlockECReconstructionCommand.BlockECReconstructionInfo)3 DatanodeRegistration (org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration)3 FileNotFoundException (java.io.FileNotFoundException)2 HashSet (java.util.HashSet)2 StorageType (org.apache.hadoop.fs.StorageType)2