Search in sources :

Example 6 with DatanodeStorage

use of org.apache.hadoop.hdfs.server.protocol.DatanodeStorage in project hadoop by apache.

the class TestInjectionForSimulatedStorage method testInjection.

/* This test makes sure that NameNode retries all the available blocks 
   * for under replicated blocks. This test uses simulated storage and one
   * of its features to inject blocks,
   * 
   * It creates a file with several blocks and replication of 4. 
   * The cluster is then shut down - NN retains its state but the DNs are 
   * all simulated and hence loose their blocks. 
   * The blocks are then injected in one of the DNs. The  expected behaviour is
   * that the NN will arrange for themissing replica will be copied from a valid source.
   */
@Test
public void testInjection() throws IOException {
    MiniDFSCluster cluster = null;
    String testFile = "/replication-test-file";
    Path testPath = new Path(testFile);
    byte[] buffer = new byte[1024];
    for (int i = 0; i < buffer.length; i++) {
        buffer[i] = '1';
    }
    try {
        Configuration conf = new HdfsConfiguration();
        conf.set(DFSConfigKeys.DFS_REPLICATION_KEY, Integer.toString(numDataNodes));
        conf.setInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, checksumSize);
        SimulatedFSDataset.setFactory(conf);
        //first time format
        cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDataNodes).build();
        cluster.waitActive();
        String bpid = cluster.getNamesystem().getBlockPoolId();
        DFSClient dfsClient = new DFSClient(new InetSocketAddress("localhost", cluster.getNameNodePort()), conf);
        DFSTestUtil.createFile(cluster.getFileSystem(), testPath, filesize, filesize, blockSize, (short) numDataNodes, 0L);
        waitForBlockReplication(testFile, dfsClient.getNamenode(), numDataNodes, 20);
        List<Map<DatanodeStorage, BlockListAsLongs>> blocksList = cluster.getAllBlockReports(bpid);
        cluster.shutdown();
        cluster = null;
        /* Start the MiniDFSCluster with more datanodes since once a writeBlock
       * to a datanode node fails, same block can not be written to it
       * immediately. In our case some replication attempts will fail.
       */
        LOG.info("Restarting minicluster");
        conf = new HdfsConfiguration();
        SimulatedFSDataset.setFactory(conf);
        conf.set(DFSConfigKeys.DFS_NAMENODE_SAFEMODE_THRESHOLD_PCT_KEY, "0.0f");
        cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDataNodes * 2).format(false).build();
        cluster.waitActive();
        Set<Block> uniqueBlocks = new HashSet<Block>();
        for (Map<DatanodeStorage, BlockListAsLongs> map : blocksList) {
            for (BlockListAsLongs blockList : map.values()) {
                for (Block b : blockList) {
                    uniqueBlocks.add(new Block(b));
                }
            }
        }
        // Insert all the blocks in the first data node
        LOG.info("Inserting " + uniqueBlocks.size() + " blocks");
        cluster.injectBlocks(0, uniqueBlocks, null);
        dfsClient = new DFSClient(new InetSocketAddress("localhost", cluster.getNameNodePort()), conf);
        waitForBlockReplication(testFile, dfsClient.getNamenode(), numDataNodes, -1);
    } finally {
        if (cluster != null) {
            cluster.shutdown();
        }
    }
}
Also used : Path(org.apache.hadoop.fs.Path) Configuration(org.apache.hadoop.conf.Configuration) InetSocketAddress(java.net.InetSocketAddress) DatanodeStorage(org.apache.hadoop.hdfs.server.protocol.DatanodeStorage) BlockListAsLongs(org.apache.hadoop.hdfs.protocol.BlockListAsLongs) LocatedBlock(org.apache.hadoop.hdfs.protocol.LocatedBlock) Block(org.apache.hadoop.hdfs.protocol.Block) Map(java.util.Map) HashSet(java.util.HashSet) Test(org.junit.Test)

Example 7 with DatanodeStorage

use of org.apache.hadoop.hdfs.server.protocol.DatanodeStorage in project hadoop by apache.

the class TestPBHelper method testBlockECRecoveryCommand.

@Test
public void testBlockECRecoveryCommand() {
    DatanodeInfo[] dnInfos0 = new DatanodeInfo[] { DFSTestUtil.getLocalDatanodeInfo(), DFSTestUtil.getLocalDatanodeInfo() };
    DatanodeStorageInfo targetDnInfos_0 = BlockManagerTestUtil.newDatanodeStorageInfo(DFSTestUtil.getLocalDatanodeDescriptor(), new DatanodeStorage("s00"));
    DatanodeStorageInfo targetDnInfos_1 = BlockManagerTestUtil.newDatanodeStorageInfo(DFSTestUtil.getLocalDatanodeDescriptor(), new DatanodeStorage("s01"));
    DatanodeStorageInfo[] targetDnInfos0 = new DatanodeStorageInfo[] { targetDnInfos_0, targetDnInfos_1 };
    byte[] liveBlkIndices0 = new byte[2];
    BlockECReconstructionInfo blkECRecoveryInfo0 = new BlockECReconstructionInfo(new ExtendedBlock("bp1", 1234), dnInfos0, targetDnInfos0, liveBlkIndices0, StripedFileTestUtil.getDefaultECPolicy());
    DatanodeInfo[] dnInfos1 = new DatanodeInfo[] { DFSTestUtil.getLocalDatanodeInfo(), DFSTestUtil.getLocalDatanodeInfo() };
    DatanodeStorageInfo targetDnInfos_2 = BlockManagerTestUtil.newDatanodeStorageInfo(DFSTestUtil.getLocalDatanodeDescriptor(), new DatanodeStorage("s02"));
    DatanodeStorageInfo targetDnInfos_3 = BlockManagerTestUtil.newDatanodeStorageInfo(DFSTestUtil.getLocalDatanodeDescriptor(), new DatanodeStorage("s03"));
    DatanodeStorageInfo[] targetDnInfos1 = new DatanodeStorageInfo[] { targetDnInfos_2, targetDnInfos_3 };
    byte[] liveBlkIndices1 = new byte[2];
    BlockECReconstructionInfo blkECRecoveryInfo1 = new BlockECReconstructionInfo(new ExtendedBlock("bp2", 3256), dnInfos1, targetDnInfos1, liveBlkIndices1, StripedFileTestUtil.getDefaultECPolicy());
    List<BlockECReconstructionInfo> blkRecoveryInfosList = new ArrayList<BlockECReconstructionInfo>();
    blkRecoveryInfosList.add(blkECRecoveryInfo0);
    blkRecoveryInfosList.add(blkECRecoveryInfo1);
    BlockECReconstructionCommand blkECReconstructionCmd = new BlockECReconstructionCommand(DatanodeProtocol.DNA_ERASURE_CODING_RECONSTRUCTION, blkRecoveryInfosList);
    BlockECReconstructionCommandProto blkECRecoveryCmdProto = PBHelper.convert(blkECReconstructionCmd);
    blkECReconstructionCmd = PBHelper.convert(blkECRecoveryCmdProto);
    Iterator<BlockECReconstructionInfo> iterator = blkECReconstructionCmd.getECTasks().iterator();
    assertBlockECRecoveryInfoEquals(blkECRecoveryInfo0, iterator.next());
    assertBlockECRecoveryInfoEquals(blkECRecoveryInfo1, iterator.next());
}
Also used : BlockECReconstructionInfo(org.apache.hadoop.hdfs.server.protocol.BlockECReconstructionCommand.BlockECReconstructionInfo) DatanodeInfo(org.apache.hadoop.hdfs.protocol.DatanodeInfo) ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) ArrayList(java.util.ArrayList) BlockECReconstructionCommand(org.apache.hadoop.hdfs.server.protocol.BlockECReconstructionCommand) BlockECReconstructionCommandProto(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockECReconstructionCommandProto) DatanodeStorageInfo(org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo) DatanodeStorage(org.apache.hadoop.hdfs.server.protocol.DatanodeStorage) Test(org.junit.Test)

Example 8 with DatanodeStorage

use of org.apache.hadoop.hdfs.server.protocol.DatanodeStorage in project hadoop by apache.

the class TestBlockListAsLongs method testDatanodeDetect.

@Test
public void testDatanodeDetect() throws ServiceException, IOException {
    final AtomicReference<BlockReportRequestProto> request = new AtomicReference<>();
    // just capture the outgoing PB
    DatanodeProtocolPB mockProxy = mock(DatanodeProtocolPB.class);
    doAnswer(new Answer<BlockReportResponseProto>() {

        public BlockReportResponseProto answer(InvocationOnMock invocation) {
            Object[] args = invocation.getArguments();
            request.set((BlockReportRequestProto) args[1]);
            return BlockReportResponseProto.newBuilder().build();
        }
    }).when(mockProxy).blockReport(any(RpcController.class), any(BlockReportRequestProto.class));
    @SuppressWarnings("resource") DatanodeProtocolClientSideTranslatorPB nn = new DatanodeProtocolClientSideTranslatorPB(mockProxy);
    DatanodeRegistration reg = DFSTestUtil.getLocalDatanodeRegistration();
    NamespaceInfo nsInfo = new NamespaceInfo(1, "cluster", "bp", 1);
    reg.setNamespaceInfo(nsInfo);
    Replica r = new FinalizedReplica(new Block(1, 2, 3), null, null);
    BlockListAsLongs bbl = BlockListAsLongs.encode(Collections.singleton(r));
    DatanodeStorage storage = new DatanodeStorage("s1");
    StorageBlockReport[] sbr = { new StorageBlockReport(storage, bbl) };
    // check DN sends new-style BR
    request.set(null);
    nsInfo.setCapabilities(Capability.STORAGE_BLOCK_REPORT_BUFFERS.getMask());
    nn.blockReport(reg, "pool", sbr, new BlockReportContext(1, 0, System.nanoTime(), 0L, true));
    BlockReportRequestProto proto = request.get();
    assertNotNull(proto);
    assertTrue(proto.getReports(0).getBlocksList().isEmpty());
    assertFalse(proto.getReports(0).getBlocksBuffersList().isEmpty());
    // back up to prior version and check DN sends old-style BR
    request.set(null);
    nsInfo.setCapabilities(Capability.UNKNOWN.getMask());
    BlockListAsLongs blockList = getBlockList(r);
    StorageBlockReport[] obp = new StorageBlockReport[] { new StorageBlockReport(new DatanodeStorage("s1"), blockList) };
    nn.blockReport(reg, "pool", obp, new BlockReportContext(1, 0, System.nanoTime(), 0L, true));
    proto = request.get();
    assertNotNull(proto);
    assertFalse(proto.getReports(0).getBlocksList().isEmpty());
    assertTrue(proto.getReports(0).getBlocksBuffersList().isEmpty());
}
Also used : StorageBlockReport(org.apache.hadoop.hdfs.server.protocol.StorageBlockReport) AtomicReference(java.util.concurrent.atomic.AtomicReference) FinalizedReplica(org.apache.hadoop.hdfs.server.datanode.FinalizedReplica) BlockReportReplica(org.apache.hadoop.hdfs.protocol.BlockListAsLongs.BlockReportReplica) Replica(org.apache.hadoop.hdfs.server.datanode.Replica) FinalizedReplica(org.apache.hadoop.hdfs.server.datanode.FinalizedReplica) RpcController(com.google.protobuf.RpcController) DatanodeProtocolClientSideTranslatorPB(org.apache.hadoop.hdfs.protocolPB.DatanodeProtocolClientSideTranslatorPB) DatanodeProtocolPB(org.apache.hadoop.hdfs.protocolPB.DatanodeProtocolPB) DatanodeRegistration(org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration) BlockReportRequestProto(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportRequestProto) InvocationOnMock(org.mockito.invocation.InvocationOnMock) BlockReportContext(org.apache.hadoop.hdfs.server.protocol.BlockReportContext) DatanodeStorage(org.apache.hadoop.hdfs.server.protocol.DatanodeStorage) BlockReportResponseProto(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportResponseProto) NamespaceInfo(org.apache.hadoop.hdfs.server.protocol.NamespaceInfo) Test(org.junit.Test)

Example 9 with DatanodeStorage

use of org.apache.hadoop.hdfs.server.protocol.DatanodeStorage in project hadoop by apache.

the class TestPBHelper method TestConvertDatanodeStorage.

@Test
public void TestConvertDatanodeStorage() {
    DatanodeStorage dns1 = new DatanodeStorage("id1", DatanodeStorage.State.NORMAL, StorageType.SSD);
    DatanodeStorageProto proto = PBHelperClient.convert(dns1);
    DatanodeStorage dns2 = PBHelperClient.convert(proto);
    compare(dns1, dns2);
}
Also used : DatanodeStorageProto(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto) DatanodeStorage(org.apache.hadoop.hdfs.server.protocol.DatanodeStorage) Test(org.junit.Test)

Example 10 with DatanodeStorage

use of org.apache.hadoop.hdfs.server.protocol.DatanodeStorage in project hadoop by apache.

the class DFSTestUtil method addBlockToFile.

/**
   * Adds a block or a striped block group to a file.
   * This method only manipulates NameNode
   * states of the file and the block without injecting data to DataNode.
   * It does mimic block reports.
   * You should disable periodical heartbeat before use this.
   * @param isStripedBlock a boolean tell if the block added a striped block
   * @param dataNodes List DataNodes to host the striped block group
   * @param previous Previous block in the file
   * @param numStripes Number of stripes in each block group
   * @param len block size for a non striped block added
   * @return The added block or block group
   */
public static Block addBlockToFile(boolean isStripedBlock, List<DataNode> dataNodes, DistributedFileSystem fs, FSNamesystem ns, String file, INodeFile fileNode, String clientName, ExtendedBlock previous, int numStripes, int len) throws Exception {
    fs.getClient().namenode.addBlock(file, clientName, previous, null, fileNode.getId(), null, null);
    final BlockInfo lastBlock = fileNode.getLastBlock();
    final int groupSize = fileNode.getPreferredBlockReplication();
    assert dataNodes.size() >= groupSize;
    // 1. RECEIVING_BLOCK IBR
    for (int i = 0; i < groupSize; i++) {
        DataNode dn = dataNodes.get(i);
        final Block block = new Block(lastBlock.getBlockId() + i, 0, lastBlock.getGenerationStamp());
        DatanodeStorage storage = new DatanodeStorage(UUID.randomUUID().toString());
        StorageReceivedDeletedBlocks[] reports = DFSTestUtil.makeReportForReceivedBlock(block, ReceivedDeletedBlockInfo.BlockStatus.RECEIVING_BLOCK, storage);
        for (StorageReceivedDeletedBlocks report : reports) {
            ns.processIncrementalBlockReport(dn.getDatanodeId(), report);
        }
    }
    final ErasureCodingPolicy ecPolicy = fs.getErasureCodingPolicy(new Path(file));
    // 2. RECEIVED_BLOCK IBR
    long blockSize = isStripedBlock ? numStripes * ecPolicy.getCellSize() : len;
    for (int i = 0; i < groupSize; i++) {
        DataNode dn = dataNodes.get(i);
        final Block block = new Block(lastBlock.getBlockId() + i, blockSize, lastBlock.getGenerationStamp());
        DatanodeStorage storage = new DatanodeStorage(UUID.randomUUID().toString());
        StorageReceivedDeletedBlocks[] reports = DFSTestUtil.makeReportForReceivedBlock(block, ReceivedDeletedBlockInfo.BlockStatus.RECEIVED_BLOCK, storage);
        for (StorageReceivedDeletedBlocks report : reports) {
            ns.processIncrementalBlockReport(dn.getDatanodeId(), report);
        }
    }
    long bytes = isStripedBlock ? numStripes * ecPolicy.getCellSize() * ecPolicy.getNumDataUnits() : len;
    lastBlock.setNumBytes(bytes);
    return lastBlock;
}
Also used : Path(org.apache.hadoop.fs.Path) BlockInfo(org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo) ReceivedDeletedBlockInfo(org.apache.hadoop.hdfs.server.protocol.ReceivedDeletedBlockInfo) DataNode(org.apache.hadoop.hdfs.server.datanode.DataNode) DatanodeStorage(org.apache.hadoop.hdfs.server.protocol.DatanodeStorage) ErasureCodingPolicy(org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy) Block(org.apache.hadoop.hdfs.protocol.Block) LocatedBlock(org.apache.hadoop.hdfs.protocol.LocatedBlock) ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) StorageReceivedDeletedBlocks(org.apache.hadoop.hdfs.server.protocol.StorageReceivedDeletedBlocks)

Aggregations

DatanodeStorage (org.apache.hadoop.hdfs.server.protocol.DatanodeStorage)47 Test (org.junit.Test)27 ArrayList (java.util.ArrayList)16 BlockListAsLongs (org.apache.hadoop.hdfs.protocol.BlockListAsLongs)13 ReceivedDeletedBlockInfo (org.apache.hadoop.hdfs.server.protocol.ReceivedDeletedBlockInfo)13 DatanodeRegistration (org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration)12 Block (org.apache.hadoop.hdfs.protocol.Block)11 Path (org.apache.hadoop.fs.Path)10 Configuration (org.apache.hadoop.conf.Configuration)8 DataNode (org.apache.hadoop.hdfs.server.datanode.DataNode)8 StorageReceivedDeletedBlocks (org.apache.hadoop.hdfs.server.protocol.StorageReceivedDeletedBlocks)8 Map (java.util.Map)7 HdfsConfiguration (org.apache.hadoop.hdfs.HdfsConfiguration)7 ExtendedBlock (org.apache.hadoop.hdfs.protocol.ExtendedBlock)7 LocatedBlock (org.apache.hadoop.hdfs.protocol.LocatedBlock)7 BlockReportContext (org.apache.hadoop.hdfs.server.protocol.BlockReportContext)7 MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)5 DatanodeInfo (org.apache.hadoop.hdfs.protocol.DatanodeInfo)5 FSNamesystem (org.apache.hadoop.hdfs.server.namenode.FSNamesystem)5 StorageBlockReport (org.apache.hadoop.hdfs.server.protocol.StorageBlockReport)5