Search in sources :

Example 6 with InterDatanodeProtocol

use of org.apache.hadoop.hdfs.server.protocol.InterDatanodeProtocol in project hadoop by apache.

the class TestBlockRecovery method testFinalizedRbwReplicas.

/**
   * BlockRecovery_02.9.
   * One replica is Finalized and another is RBW. 
   * @throws IOException in case of an error
   */
@Test(timeout = 60000)
public void testFinalizedRbwReplicas() throws IOException {
    if (LOG.isDebugEnabled()) {
        LOG.debug("Running " + GenericTestUtils.getMethodName());
    }
    // rbw and finalized replicas have the same length
    ReplicaRecoveryInfo replica1 = new ReplicaRecoveryInfo(BLOCK_ID, REPLICA_LEN1, GEN_STAMP - 1, ReplicaState.FINALIZED);
    ReplicaRecoveryInfo replica2 = new ReplicaRecoveryInfo(BLOCK_ID, REPLICA_LEN1, GEN_STAMP - 2, ReplicaState.RBW);
    InterDatanodeProtocol dn1 = mock(InterDatanodeProtocol.class);
    InterDatanodeProtocol dn2 = mock(InterDatanodeProtocol.class);
    testSyncReplicas(replica1, replica2, dn1, dn2, REPLICA_LEN1);
    verify(dn1).updateReplicaUnderRecovery(block, RECOVERY_ID, BLOCK_ID, REPLICA_LEN1);
    verify(dn2).updateReplicaUnderRecovery(block, RECOVERY_ID, BLOCK_ID, REPLICA_LEN1);
    // rbw replica has a different length from the finalized one
    replica1 = new ReplicaRecoveryInfo(BLOCK_ID, REPLICA_LEN1, GEN_STAMP - 1, ReplicaState.FINALIZED);
    replica2 = new ReplicaRecoveryInfo(BLOCK_ID, REPLICA_LEN2, GEN_STAMP - 2, ReplicaState.RBW);
    dn1 = mock(InterDatanodeProtocol.class);
    dn2 = mock(InterDatanodeProtocol.class);
    testSyncReplicas(replica1, replica2, dn1, dn2, REPLICA_LEN1);
    verify(dn1).updateReplicaUnderRecovery(block, RECOVERY_ID, BLOCK_ID, REPLICA_LEN1);
    verify(dn2, never()).updateReplicaUnderRecovery(block, RECOVERY_ID, BLOCK_ID, REPLICA_LEN1);
}
Also used : ReplicaRecoveryInfo(org.apache.hadoop.hdfs.server.protocol.ReplicaRecoveryInfo) InterDatanodeProtocol(org.apache.hadoop.hdfs.server.protocol.InterDatanodeProtocol) Test(org.junit.Test)

Example 7 with InterDatanodeProtocol

use of org.apache.hadoop.hdfs.server.protocol.InterDatanodeProtocol in project hadoop by apache.

the class TestInterDatanodeProtocol method checkBlockMetaDataInfo.

/**
   * The following test first creates a file.
   * It verifies the block information from a datanode.
   * Then, it updates the block with new information and verifies again.
   * @param useDnHostname whether DNs should connect to other DNs by hostname
   */
private void checkBlockMetaDataInfo(boolean useDnHostname) throws Exception {
    MiniDFSCluster cluster = null;
    conf.setBoolean(DFSConfigKeys.DFS_DATANODE_USE_DN_HOSTNAME, useDnHostname);
    if (useDnHostname) {
        // Since the mini cluster only listens on the loopback we have to
        // ensure the hostname used to access DNs maps to the loopback. We
        // do this by telling the DN to advertise localhost as its hostname
        // instead of the default hostname.
        conf.set(DFSConfigKeys.DFS_DATANODE_HOST_NAME_KEY, "localhost");
    }
    try {
        cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).checkDataNodeHostConfig(true).build();
        cluster.waitActive();
        //create a file
        DistributedFileSystem dfs = cluster.getFileSystem();
        String filestr = "/foo";
        Path filepath = new Path(filestr);
        DFSTestUtil.createFile(dfs, filepath, 1024L, (short) 3, 0L);
        assertTrue(dfs.exists(filepath));
        //get block info
        LocatedBlock locatedblock = getLastLocatedBlock(DFSClientAdapter.getDFSClient(dfs).getNamenode(), filestr);
        DatanodeInfo[] datanodeinfo = locatedblock.getLocations();
        assertTrue(datanodeinfo.length > 0);
        //connect to a data node
        DataNode datanode = cluster.getDataNode(datanodeinfo[0].getIpcPort());
        InterDatanodeProtocol idp = DataNodeTestUtils.createInterDatanodeProtocolProxy(datanode, datanodeinfo[0], conf, useDnHostname);
        // Stop the block scanners.
        datanode.getBlockScanner().removeAllVolumeScanners();
        //verify BlockMetaDataInfo
        ExtendedBlock b = locatedblock.getBlock();
        InterDatanodeProtocol.LOG.info("b=" + b + ", " + b.getClass());
        checkMetaInfo(b, datanode);
        long recoveryId = b.getGenerationStamp() + 1;
        idp.initReplicaRecovery(new RecoveringBlock(b, locatedblock.getLocations(), recoveryId));
        //verify updateBlock
        ExtendedBlock newblock = new ExtendedBlock(b.getBlockPoolId(), b.getBlockId(), b.getNumBytes() / 2, b.getGenerationStamp() + 1);
        idp.updateReplicaUnderRecovery(b, recoveryId, b.getBlockId(), newblock.getNumBytes());
        checkMetaInfo(newblock, datanode);
        // Verify correct null response trying to init recovery for a missing block
        ExtendedBlock badBlock = new ExtendedBlock("fake-pool", b.getBlockId(), 0, 0);
        assertNull(idp.initReplicaRecovery(new RecoveringBlock(badBlock, locatedblock.getLocations(), recoveryId)));
    } finally {
        if (cluster != null) {
            cluster.shutdown();
        }
    }
}
Also used : Path(org.apache.hadoop.fs.Path) DatanodeInfo(org.apache.hadoop.hdfs.protocol.DatanodeInfo) MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) DatanodeInfoBuilder(org.apache.hadoop.hdfs.protocol.DatanodeInfo.DatanodeInfoBuilder) ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) LocatedBlock(org.apache.hadoop.hdfs.protocol.LocatedBlock) InterDatanodeProtocol(org.apache.hadoop.hdfs.server.protocol.InterDatanodeProtocol) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) DataNode(org.apache.hadoop.hdfs.server.datanode.DataNode) RecoveringBlock(org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringBlock)

Example 8 with InterDatanodeProtocol

use of org.apache.hadoop.hdfs.server.protocol.InterDatanodeProtocol in project hadoop by apache.

the class TestInterDatanodeProtocol method testInterDNProtocolTimeout.

/** Test to verify that InterDatanode RPC timesout as expected when
   *  the server DN does not respond.
   */
@Test(expected = SocketTimeoutException.class)
public void testInterDNProtocolTimeout() throws Throwable {
    final Server server = new TestServer(1, true);
    server.start();
    final InetSocketAddress addr = NetUtils.getConnectAddress(server);
    DatanodeID fakeDnId = DFSTestUtil.getLocalDatanodeID(addr.getPort());
    DatanodeInfo dInfo = new DatanodeInfoBuilder().setNodeID(fakeDnId).build();
    InterDatanodeProtocol proxy = null;
    try {
        proxy = DataNode.createInterDataNodeProtocolProxy(dInfo, conf, 500, false);
        proxy.initReplicaRecovery(new RecoveringBlock(new ExtendedBlock("bpid", 1), null, 100));
        fail("Expected SocketTimeoutException exception, but did not get.");
    } finally {
        if (proxy != null) {
            RPC.stopProxy(proxy);
        }
        server.stop();
    }
}
Also used : DatanodeID(org.apache.hadoop.hdfs.protocol.DatanodeID) DatanodeInfo(org.apache.hadoop.hdfs.protocol.DatanodeInfo) Server(org.apache.hadoop.ipc.Server) DatanodeInfoBuilder(org.apache.hadoop.hdfs.protocol.DatanodeInfo.DatanodeInfoBuilder) InetSocketAddress(java.net.InetSocketAddress) RecoveringBlock(org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringBlock) ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) InterDatanodeProtocol(org.apache.hadoop.hdfs.server.protocol.InterDatanodeProtocol) Test(org.junit.Test)

Example 9 with InterDatanodeProtocol

use of org.apache.hadoop.hdfs.server.protocol.InterDatanodeProtocol in project hadoop by apache.

the class DataNode method createInterDataNodeProtocolProxy.

public static InterDatanodeProtocol createInterDataNodeProtocolProxy(DatanodeID datanodeid, final Configuration conf, final int socketTimeout, final boolean connectToDnViaHostname) throws IOException {
    final String dnAddr = datanodeid.getIpcAddr(connectToDnViaHostname);
    final InetSocketAddress addr = NetUtils.createSocketAddr(dnAddr);
    if (LOG.isDebugEnabled()) {
        LOG.debug("Connecting to datanode " + dnAddr + " addr=" + addr);
    }
    final UserGroupInformation loginUgi = UserGroupInformation.getLoginUser();
    try {
        return loginUgi.doAs(new PrivilegedExceptionAction<InterDatanodeProtocol>() {

            @Override
            public InterDatanodeProtocol run() throws IOException {
                return new InterDatanodeProtocolTranslatorPB(addr, loginUgi, conf, NetUtils.getDefaultSocketFactory(conf), socketTimeout);
            }
        });
    } catch (InterruptedException ie) {
        throw new IOException(ie.getMessage());
    }
}
Also used : InterDatanodeProtocolTranslatorPB(org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB) InetSocketAddress(java.net.InetSocketAddress) InterDatanodeProtocol(org.apache.hadoop.hdfs.server.protocol.InterDatanodeProtocol) IOException(java.io.IOException) UserGroupInformation(org.apache.hadoop.security.UserGroupInformation)

Aggregations

InterDatanodeProtocol (org.apache.hadoop.hdfs.server.protocol.InterDatanodeProtocol)9 Test (org.junit.Test)7 ReplicaRecoveryInfo (org.apache.hadoop.hdfs.server.protocol.ReplicaRecoveryInfo)6 IOException (java.io.IOException)2 InetSocketAddress (java.net.InetSocketAddress)2 DatanodeInfo (org.apache.hadoop.hdfs.protocol.DatanodeInfo)2 DatanodeInfoBuilder (org.apache.hadoop.hdfs.protocol.DatanodeInfo.DatanodeInfoBuilder)2 ExtendedBlock (org.apache.hadoop.hdfs.protocol.ExtendedBlock)2 RecoveringBlock (org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringBlock)2 Path (org.apache.hadoop.fs.Path)1 DistributedFileSystem (org.apache.hadoop.hdfs.DistributedFileSystem)1 MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)1 DatanodeID (org.apache.hadoop.hdfs.protocol.DatanodeID)1 LocatedBlock (org.apache.hadoop.hdfs.protocol.LocatedBlock)1 InterDatanodeProtocolTranslatorPB (org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB)1 DataNode (org.apache.hadoop.hdfs.server.datanode.DataNode)1 Server (org.apache.hadoop.ipc.Server)1 UserGroupInformation (org.apache.hadoop.security.UserGroupInformation)1