Search in sources :

Example 16 with RecoveringBlock

use of org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringBlock in project hadoop by apache.

the class TestPBHelper method testConvertBlockRecoveryCommand.

@Test
public void testConvertBlockRecoveryCommand() {
    DatanodeInfo di1 = DFSTestUtil.getLocalDatanodeInfo();
    DatanodeInfo di2 = DFSTestUtil.getLocalDatanodeInfo();
    DatanodeInfo[] dnInfo = new DatanodeInfo[] { di1, di2 };
    List<RecoveringBlock> blks = ImmutableList.of(new RecoveringBlock(getExtendedBlock(1), dnInfo, 3), new RecoveringBlock(getExtendedBlock(2), dnInfo, 3));
    BlockRecoveryCommand cmd = new BlockRecoveryCommand(blks);
    BlockRecoveryCommandProto proto = PBHelper.convert(cmd);
    assertEquals(1, proto.getBlocks(0).getBlock().getB().getBlockId());
    assertEquals(2, proto.getBlocks(1).getBlock().getB().getBlockId());
    BlockRecoveryCommand cmd2 = PBHelper.convert(proto);
    List<RecoveringBlock> cmd2Blks = Lists.newArrayList(cmd2.getRecoveringBlocks());
    assertEquals(blks.get(0).getBlock(), cmd2Blks.get(0).getBlock());
    assertEquals(blks.get(1).getBlock(), cmd2Blks.get(1).getBlock());
    assertEquals(Joiner.on(",").join(blks), Joiner.on(",").join(cmd2Blks));
    assertEquals(cmd.toString(), cmd2.toString());
}
Also used : BlockRecoveryCommand(org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand) DatanodeInfo(org.apache.hadoop.hdfs.protocol.DatanodeInfo) RecoveringBlock(org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringBlock) BlockRecoveryCommandProto(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommandProto) Test(org.junit.Test)

Example 17 with RecoveringBlock

use of org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringBlock in project hadoop by apache.

the class TestBlockRecovery method testZeroLenReplicas.

/**
   * BlockRecoveryFI_07. max replica length from all DNs is zero.
   *
   * @throws IOException in case of an error
   */
@Test(timeout = 60000)
public void testZeroLenReplicas() throws IOException, InterruptedException {
    if (LOG.isDebugEnabled()) {
        LOG.debug("Running " + GenericTestUtils.getMethodName());
    }
    doReturn(new ReplicaRecoveryInfo(block.getBlockId(), 0, block.getGenerationStamp(), ReplicaState.FINALIZED)).when(spyDN).initReplicaRecovery(any(RecoveringBlock.class));
    for (RecoveringBlock rBlock : initRecoveringBlocks()) {
        BlockRecoveryWorker.RecoveryTaskContiguous RecoveryTaskContiguous = recoveryWorker.new RecoveryTaskContiguous(rBlock);
        BlockRecoveryWorker.RecoveryTaskContiguous spyTask = spy(RecoveryTaskContiguous);
        spyTask.recover();
    }
    DatanodeProtocol dnP = recoveryWorker.getActiveNamenodeForBP(POOL_ID);
    verify(dnP).commitBlockSynchronization(block, RECOVERY_ID, 0, true, true, DatanodeID.EMPTY_ARRAY, null);
}
Also used : ReplicaRecoveryInfo(org.apache.hadoop.hdfs.server.protocol.ReplicaRecoveryInfo) RecoveringBlock(org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringBlock) InterDatanodeProtocol(org.apache.hadoop.hdfs.server.protocol.InterDatanodeProtocol) DatanodeProtocol(org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol) Test(org.junit.Test)

Example 18 with RecoveringBlock

use of org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringBlock in project hadoop by apache.

the class TestBlockRecovery method testNotMatchedReplicaID.

/**
   * BlockRecoveryFI_11. a replica's recovery id does not match new GS.
   *
   * @throws IOException in case of an error
   */
@Test(timeout = 60000)
public void testNotMatchedReplicaID() throws IOException {
    if (LOG.isDebugEnabled()) {
        LOG.debug("Running " + GenericTestUtils.getMethodName());
    }
    ReplicaInPipeline replicaInfo = dn.data.createRbw(StorageType.DEFAULT, block, false).getReplica();
    ReplicaOutputStreams streams = null;
    try {
        streams = replicaInfo.createStreams(true, DataChecksum.newDataChecksum(DataChecksum.Type.CRC32, 512));
        streams.getChecksumOut().write('a');
        dn.data.initReplicaRecovery(new RecoveringBlock(block, null, RECOVERY_ID + 1));
        BlockRecoveryWorker.RecoveryTaskContiguous RecoveryTaskContiguous = recoveryWorker.new RecoveryTaskContiguous(rBlock);
        try {
            RecoveryTaskContiguous.syncBlock(initBlockRecords(dn));
            fail("Sync should fail");
        } catch (IOException e) {
            e.getMessage().startsWith("Cannot recover ");
        }
        DatanodeProtocol namenode = recoveryWorker.getActiveNamenodeForBP(POOL_ID);
        verify(namenode, never()).commitBlockSynchronization(any(ExtendedBlock.class), anyLong(), anyLong(), anyBoolean(), anyBoolean(), any(DatanodeID[].class), any(String[].class));
    } finally {
        streams.close();
    }
}
Also used : RecoveringBlock(org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringBlock) ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) ReplicaOutputStreams(org.apache.hadoop.hdfs.server.datanode.fsdataset.ReplicaOutputStreams) IOException(java.io.IOException) InterDatanodeProtocol(org.apache.hadoop.hdfs.server.protocol.InterDatanodeProtocol) DatanodeProtocol(org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol) Test(org.junit.Test)

Example 19 with RecoveringBlock

use of org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringBlock in project hadoop by apache.

the class TestInterDatanodeProtocol method checkBlockMetaDataInfo.

/**
   * The following test first creates a file.
   * It verifies the block information from a datanode.
   * Then, it updates the block with new information and verifies again.
   * @param useDnHostname whether DNs should connect to other DNs by hostname
   */
private void checkBlockMetaDataInfo(boolean useDnHostname) throws Exception {
    MiniDFSCluster cluster = null;
    conf.setBoolean(DFSConfigKeys.DFS_DATANODE_USE_DN_HOSTNAME, useDnHostname);
    if (useDnHostname) {
        // Since the mini cluster only listens on the loopback we have to
        // ensure the hostname used to access DNs maps to the loopback. We
        // do this by telling the DN to advertise localhost as its hostname
        // instead of the default hostname.
        conf.set(DFSConfigKeys.DFS_DATANODE_HOST_NAME_KEY, "localhost");
    }
    try {
        cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).checkDataNodeHostConfig(true).build();
        cluster.waitActive();
        //create a file
        DistributedFileSystem dfs = cluster.getFileSystem();
        String filestr = "/foo";
        Path filepath = new Path(filestr);
        DFSTestUtil.createFile(dfs, filepath, 1024L, (short) 3, 0L);
        assertTrue(dfs.exists(filepath));
        //get block info
        LocatedBlock locatedblock = getLastLocatedBlock(DFSClientAdapter.getDFSClient(dfs).getNamenode(), filestr);
        DatanodeInfo[] datanodeinfo = locatedblock.getLocations();
        assertTrue(datanodeinfo.length > 0);
        //connect to a data node
        DataNode datanode = cluster.getDataNode(datanodeinfo[0].getIpcPort());
        InterDatanodeProtocol idp = DataNodeTestUtils.createInterDatanodeProtocolProxy(datanode, datanodeinfo[0], conf, useDnHostname);
        // Stop the block scanners.
        datanode.getBlockScanner().removeAllVolumeScanners();
        //verify BlockMetaDataInfo
        ExtendedBlock b = locatedblock.getBlock();
        InterDatanodeProtocol.LOG.info("b=" + b + ", " + b.getClass());
        checkMetaInfo(b, datanode);
        long recoveryId = b.getGenerationStamp() + 1;
        idp.initReplicaRecovery(new RecoveringBlock(b, locatedblock.getLocations(), recoveryId));
        //verify updateBlock
        ExtendedBlock newblock = new ExtendedBlock(b.getBlockPoolId(), b.getBlockId(), b.getNumBytes() / 2, b.getGenerationStamp() + 1);
        idp.updateReplicaUnderRecovery(b, recoveryId, b.getBlockId(), newblock.getNumBytes());
        checkMetaInfo(newblock, datanode);
        // Verify correct null response trying to init recovery for a missing block
        ExtendedBlock badBlock = new ExtendedBlock("fake-pool", b.getBlockId(), 0, 0);
        assertNull(idp.initReplicaRecovery(new RecoveringBlock(badBlock, locatedblock.getLocations(), recoveryId)));
    } finally {
        if (cluster != null) {
            cluster.shutdown();
        }
    }
}
Also used : Path(org.apache.hadoop.fs.Path) DatanodeInfo(org.apache.hadoop.hdfs.protocol.DatanodeInfo) MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) DatanodeInfoBuilder(org.apache.hadoop.hdfs.protocol.DatanodeInfo.DatanodeInfoBuilder) ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) LocatedBlock(org.apache.hadoop.hdfs.protocol.LocatedBlock) InterDatanodeProtocol(org.apache.hadoop.hdfs.server.protocol.InterDatanodeProtocol) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) DataNode(org.apache.hadoop.hdfs.server.datanode.DataNode) RecoveringBlock(org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringBlock)

Example 20 with RecoveringBlock

use of org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringBlock in project hadoop by apache.

the class TestInterDatanodeProtocol method testInterDNProtocolTimeout.

/** Test to verify that InterDatanode RPC timesout as expected when
   *  the server DN does not respond.
   */
@Test(expected = SocketTimeoutException.class)
public void testInterDNProtocolTimeout() throws Throwable {
    final Server server = new TestServer(1, true);
    server.start();
    final InetSocketAddress addr = NetUtils.getConnectAddress(server);
    DatanodeID fakeDnId = DFSTestUtil.getLocalDatanodeID(addr.getPort());
    DatanodeInfo dInfo = new DatanodeInfoBuilder().setNodeID(fakeDnId).build();
    InterDatanodeProtocol proxy = null;
    try {
        proxy = DataNode.createInterDataNodeProtocolProxy(dInfo, conf, 500, false);
        proxy.initReplicaRecovery(new RecoveringBlock(new ExtendedBlock("bpid", 1), null, 100));
        fail("Expected SocketTimeoutException exception, but did not get.");
    } finally {
        if (proxy != null) {
            RPC.stopProxy(proxy);
        }
        server.stop();
    }
}
Also used : DatanodeID(org.apache.hadoop.hdfs.protocol.DatanodeID) DatanodeInfo(org.apache.hadoop.hdfs.protocol.DatanodeInfo) Server(org.apache.hadoop.ipc.Server) DatanodeInfoBuilder(org.apache.hadoop.hdfs.protocol.DatanodeInfo.DatanodeInfoBuilder) InetSocketAddress(java.net.InetSocketAddress) RecoveringBlock(org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringBlock) ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) InterDatanodeProtocol(org.apache.hadoop.hdfs.server.protocol.InterDatanodeProtocol) Test(org.junit.Test)

Aggregations

RecoveringBlock (org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringBlock)20 Test (org.junit.Test)10 IOException (java.io.IOException)8 DatanodeInfo (org.apache.hadoop.hdfs.protocol.DatanodeInfo)8 ExtendedBlock (org.apache.hadoop.hdfs.protocol.ExtendedBlock)6 ReplicaRecoveryInfo (org.apache.hadoop.hdfs.server.protocol.ReplicaRecoveryInfo)5 DatanodeInfoBuilder (org.apache.hadoop.hdfs.protocol.DatanodeInfo.DatanodeInfoBuilder)4 LocatedBlock (org.apache.hadoop.hdfs.protocol.LocatedBlock)4 InterDatanodeProtocol (org.apache.hadoop.hdfs.server.protocol.InterDatanodeProtocol)4 ArrayList (java.util.ArrayList)3 Path (org.apache.hadoop.fs.Path)3 DistributedFileSystem (org.apache.hadoop.hdfs.DistributedFileSystem)3 MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)3 BlockRecord (org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.BlockRecord)3 RecoveryInProgressException (org.apache.hadoop.hdfs.protocol.RecoveryInProgressException)2 RecoveringBlockProto (org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.RecoveringBlockProto)2 DataNode (org.apache.hadoop.hdfs.server.datanode.DataNode)2 BlockRecoveryCommand (org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand)2 RecoveringStripedBlock (org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringStripedBlock)2 DatanodeProtocol (org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol)2