Search in sources :

Example 41 with DatanodeID

use of org.apache.hadoop.hdfs.protocol.DatanodeID in project hadoop by apache.

the class TestDataNodeVolumeFailure method accessBlock.

/**
   * try to access a block on a data node. If fails - throws exception
   * @param datanode
   * @param lblock
   * @throws IOException
   */
private void accessBlock(DatanodeInfo datanode, LocatedBlock lblock) throws IOException {
    InetSocketAddress targetAddr = null;
    ExtendedBlock block = lblock.getBlock();
    targetAddr = NetUtils.createSocketAddr(datanode.getXferAddr());
    BlockReader blockReader = new BlockReaderFactory(new DfsClientConf(conf)).setInetSocketAddress(targetAddr).setBlock(block).setFileName(BlockReaderFactory.getFileName(targetAddr, "test-blockpoolid", block.getBlockId())).setBlockToken(lblock.getBlockToken()).setStartOffset(0).setLength(0).setVerifyChecksum(true).setClientName("TestDataNodeVolumeFailure").setDatanodeInfo(datanode).setCachingStrategy(CachingStrategy.newDefaultStrategy()).setClientCacheContext(ClientContext.getFromConf(conf)).setConfiguration(conf).setTracer(FsTracer.get(conf)).setRemotePeerFactory(new RemotePeerFactory() {

        @Override
        public Peer newConnectedPeer(InetSocketAddress addr, Token<BlockTokenIdentifier> blockToken, DatanodeID datanodeId) throws IOException {
            Peer peer = null;
            Socket sock = NetUtils.getDefaultSocketFactory(conf).createSocket();
            try {
                sock.connect(addr, HdfsConstants.READ_TIMEOUT);
                sock.setSoTimeout(HdfsConstants.READ_TIMEOUT);
                peer = DFSUtilClient.peerFromSocket(sock);
            } finally {
                if (peer == null) {
                    IOUtils.closeSocket(sock);
                }
            }
            return peer;
        }
    }).build();
    blockReader.close();
}
Also used : DfsClientConf(org.apache.hadoop.hdfs.client.impl.DfsClientConf) DatanodeID(org.apache.hadoop.hdfs.protocol.DatanodeID) InetSocketAddress(java.net.InetSocketAddress) BlockReader(org.apache.hadoop.hdfs.BlockReader) Peer(org.apache.hadoop.hdfs.net.Peer) ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) BlockReaderFactory(org.apache.hadoop.hdfs.client.impl.BlockReaderFactory) Token(org.apache.hadoop.security.token.Token) RemotePeerFactory(org.apache.hadoop.hdfs.RemotePeerFactory) Socket(java.net.Socket)

Example 42 with DatanodeID

use of org.apache.hadoop.hdfs.protocol.DatanodeID in project hadoop by apache.

the class TestInterDatanodeProtocol method testInterDNProtocolTimeout.

/** Test to verify that InterDatanode RPC timesout as expected when
   *  the server DN does not respond.
   */
@Test(expected = SocketTimeoutException.class)
public void testInterDNProtocolTimeout() throws Throwable {
    final Server server = new TestServer(1, true);
    server.start();
    final InetSocketAddress addr = NetUtils.getConnectAddress(server);
    DatanodeID fakeDnId = DFSTestUtil.getLocalDatanodeID(addr.getPort());
    DatanodeInfo dInfo = new DatanodeInfoBuilder().setNodeID(fakeDnId).build();
    InterDatanodeProtocol proxy = null;
    try {
        proxy = DataNode.createInterDataNodeProtocolProxy(dInfo, conf, 500, false);
        proxy.initReplicaRecovery(new RecoveringBlock(new ExtendedBlock("bpid", 1), null, 100));
        fail("Expected SocketTimeoutException exception, but did not get.");
    } finally {
        if (proxy != null) {
            RPC.stopProxy(proxy);
        }
        server.stop();
    }
}
Also used : DatanodeID(org.apache.hadoop.hdfs.protocol.DatanodeID) DatanodeInfo(org.apache.hadoop.hdfs.protocol.DatanodeInfo) Server(org.apache.hadoop.ipc.Server) DatanodeInfoBuilder(org.apache.hadoop.hdfs.protocol.DatanodeInfo.DatanodeInfoBuilder) InetSocketAddress(java.net.InetSocketAddress) RecoveringBlock(org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringBlock) ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) InterDatanodeProtocol(org.apache.hadoop.hdfs.server.protocol.InterDatanodeProtocol) Test(org.junit.Test)

Example 43 with DatanodeID

use of org.apache.hadoop.hdfs.protocol.DatanodeID in project hadoop by apache.

the class TestDeleteRace method testDeleteAndCommitBlockSynchronizationRace.

/**
   * Test race between delete operation and commitBlockSynchronization method.
   * See HDFS-6825.
   * @param hasSnapshot
   * @throws Exception
   */
private void testDeleteAndCommitBlockSynchronizationRace(boolean hasSnapshot) throws Exception {
    LOG.info("Start testing, hasSnapshot: " + hasSnapshot);
    ArrayList<AbstractMap.SimpleImmutableEntry<String, Boolean>> testList = new ArrayList<AbstractMap.SimpleImmutableEntry<String, Boolean>>();
    testList.add(new AbstractMap.SimpleImmutableEntry<String, Boolean>("/test-file", false));
    testList.add(new AbstractMap.SimpleImmutableEntry<String, Boolean>("/test-file1", true));
    testList.add(new AbstractMap.SimpleImmutableEntry<String, Boolean>("/testdir/testdir1/test-file", false));
    testList.add(new AbstractMap.SimpleImmutableEntry<String, Boolean>("/testdir/testdir1/test-file1", true));
    final Path rootPath = new Path("/");
    final Configuration conf = new Configuration();
    // Disable permissions so that another user can recover the lease.
    conf.setBoolean(DFSConfigKeys.DFS_PERMISSIONS_ENABLED_KEY, false);
    conf.setInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCK_SIZE);
    FSDataOutputStream stm = null;
    Map<DataNode, DatanodeProtocolClientSideTranslatorPB> dnMap = new HashMap<DataNode, DatanodeProtocolClientSideTranslatorPB>();
    try {
        cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
        cluster.waitActive();
        DistributedFileSystem fs = cluster.getFileSystem();
        int stId = 0;
        for (AbstractMap.SimpleImmutableEntry<String, Boolean> stest : testList) {
            String testPath = stest.getKey();
            Boolean mkSameDir = stest.getValue();
            LOG.info("test on " + testPath + " mkSameDir: " + mkSameDir + " snapshot: " + hasSnapshot);
            Path fPath = new Path(testPath);
            //find grandest non-root parent
            Path grandestNonRootParent = fPath;
            while (!grandestNonRootParent.getParent().equals(rootPath)) {
                grandestNonRootParent = grandestNonRootParent.getParent();
            }
            stm = fs.create(fPath);
            LOG.info("test on " + testPath + " created " + fPath);
            // write a half block
            AppendTestUtil.write(stm, 0, BLOCK_SIZE / 2);
            stm.hflush();
            if (hasSnapshot) {
                SnapshotTestHelper.createSnapshot(fs, rootPath, "st" + String.valueOf(stId));
                ++stId;
            }
            // Look into the block manager on the active node for the block
            // under construction.
            NameNode nn = cluster.getNameNode();
            ExtendedBlock blk = DFSTestUtil.getFirstBlock(fs, fPath);
            DatanodeDescriptor expectedPrimary = DFSTestUtil.getExpectedPrimaryNode(nn, blk);
            LOG.info("Expecting block recovery to be triggered on DN " + expectedPrimary);
            // Find the corresponding DN daemon, and spy on its connection to the
            // active.
            DataNode primaryDN = cluster.getDataNode(expectedPrimary.getIpcPort());
            DatanodeProtocolClientSideTranslatorPB nnSpy = dnMap.get(primaryDN);
            if (nnSpy == null) {
                nnSpy = InternalDataNodeTestUtils.spyOnBposToNN(primaryDN, nn);
                dnMap.put(primaryDN, nnSpy);
            }
            // Delay the commitBlockSynchronization call
            DelayAnswer delayer = new DelayAnswer(LOG);
            Mockito.doAnswer(delayer).when(nnSpy).commitBlockSynchronization(Mockito.eq(blk), // new genstamp
            Mockito.anyInt(), // new length
            Mockito.anyLong(), // close file
            Mockito.eq(true), // delete block
            Mockito.eq(false), // new targets
            (DatanodeID[]) Mockito.anyObject(), // new target storages
            (String[]) Mockito.anyObject());
            fs.recoverLease(fPath);
            LOG.info("Waiting for commitBlockSynchronization call from primary");
            delayer.waitForCall();
            LOG.info("Deleting recursively " + grandestNonRootParent);
            fs.delete(grandestNonRootParent, true);
            if (mkSameDir && !grandestNonRootParent.toString().equals(testPath)) {
                LOG.info("Recreate dir " + grandestNonRootParent + " testpath: " + testPath);
                fs.mkdirs(grandestNonRootParent);
            }
            delayer.proceed();
            LOG.info("Now wait for result");
            delayer.waitForResult();
            Throwable t = delayer.getThrown();
            if (t != null) {
                LOG.info("Result exception (snapshot: " + hasSnapshot + "): " + t);
            }
        }
        // end of loop each fPath
        LOG.info("Now check we can restart");
        cluster.restartNameNodes();
        LOG.info("Restart finished");
    } finally {
        if (stm != null) {
            IOUtils.closeStream(stm);
        }
        if (cluster != null) {
            cluster.shutdown();
        }
    }
}
Also used : Path(org.apache.hadoop.fs.Path) MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) Configuration(org.apache.hadoop.conf.Configuration) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) HashMap(java.util.HashMap) ArrayList(java.util.ArrayList) ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) DelayAnswer(org.apache.hadoop.test.GenericTestUtils.DelayAnswer) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) AbstractMap(java.util.AbstractMap) DatanodeProtocolClientSideTranslatorPB(org.apache.hadoop.hdfs.protocolPB.DatanodeProtocolClientSideTranslatorPB) DatanodeDescriptor(org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor) DatanodeID(org.apache.hadoop.hdfs.protocol.DatanodeID) DataNode(org.apache.hadoop.hdfs.server.datanode.DataNode) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream)

Example 44 with DatanodeID

use of org.apache.hadoop.hdfs.protocol.DatanodeID in project hadoop by apache.

the class TestCommitBlockSynchronization method testCommitBlockSynchronization2.

@Test
public void testCommitBlockSynchronization2() throws IOException {
    INodeFile file = mockFileUnderConstruction();
    Block block = new Block(blockId, length, genStamp);
    FSNamesystem namesystemSpy = makeNameSystemSpy(block, file);
    DatanodeID[] newTargets = new DatanodeID[0];
    ExtendedBlock lastBlock = new ExtendedBlock();
    namesystemSpy.commitBlockSynchronization(lastBlock, genStamp, length, false, false, newTargets, null);
    // the block recovery ID.
    try {
        namesystemSpy.commitBlockSynchronization(lastBlock, genStamp - 1, length, false, false, newTargets, null);
        fail("Failed to get expected IOException on generation stamp/" + "recovery ID mismatch");
    } catch (IOException ioe) {
    // Expected exception.
    }
}
Also used : DatanodeID(org.apache.hadoop.hdfs.protocol.DatanodeID) ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) Block(org.apache.hadoop.hdfs.protocol.Block) IOException(java.io.IOException) Test(org.junit.Test)

Example 45 with DatanodeID

use of org.apache.hadoop.hdfs.protocol.DatanodeID in project hadoop by apache.

the class TestCommitBlockSynchronization method testCommitBlockSynchronizationWithClose.

@Test
public void testCommitBlockSynchronizationWithClose() throws IOException {
    INodeFile file = mockFileUnderConstruction();
    Block block = new Block(blockId, length, genStamp);
    FSNamesystem namesystemSpy = makeNameSystemSpy(block, file);
    DatanodeID[] newTargets = new DatanodeID[0];
    ExtendedBlock lastBlock = new ExtendedBlock();
    namesystemSpy.commitBlockSynchronization(lastBlock, genStamp, length, true, false, newTargets, null);
    // Repeat the call to make sure it returns true
    namesystemSpy.commitBlockSynchronization(lastBlock, genStamp, length, true, false, newTargets, null);
    BlockInfo completedBlockInfo = new BlockInfoContiguous(block, (short) 1);
    completedBlockInfo.setBlockCollectionId(file.getId());
    completedBlockInfo.setGenerationStamp(genStamp);
    doReturn(completedBlockInfo).when(namesystemSpy).getStoredBlock(any(Block.class));
    doReturn(completedBlockInfo).when(file).getLastBlock();
    namesystemSpy.commitBlockSynchronization(lastBlock, genStamp, length, true, false, newTargets, null);
}
Also used : BlockInfoContiguous(org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguous) DatanodeID(org.apache.hadoop.hdfs.protocol.DatanodeID) BlockInfo(org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo) ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) Block(org.apache.hadoop.hdfs.protocol.Block) Test(org.junit.Test)

Aggregations

DatanodeID (org.apache.hadoop.hdfs.protocol.DatanodeID)51 Test (org.junit.Test)36 ExtendedBlock (org.apache.hadoop.hdfs.protocol.ExtendedBlock)18 Configuration (org.apache.hadoop.conf.Configuration)13 Path (org.apache.hadoop.fs.Path)12 MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)10 DatanodeInfo (org.apache.hadoop.hdfs.protocol.DatanodeInfo)10 IOException (java.io.IOException)8 InetSocketAddress (java.net.InetSocketAddress)8 Peer (org.apache.hadoop.hdfs.net.Peer)8 DataNode (org.apache.hadoop.hdfs.server.datanode.DataNode)8 DatanodeRegistration (org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration)8 HdfsConfiguration (org.apache.hadoop.hdfs.HdfsConfiguration)7 Block (org.apache.hadoop.hdfs.protocol.Block)7 FSNamesystem (org.apache.hadoop.hdfs.server.namenode.FSNamesystem)6 Socket (java.net.Socket)5 FileSystem (org.apache.hadoop.fs.FileSystem)5 DistributedFileSystem (org.apache.hadoop.hdfs.DistributedFileSystem)5 LocatedBlock (org.apache.hadoop.hdfs.protocol.LocatedBlock)5 StorageInfo (org.apache.hadoop.hdfs.server.common.StorageInfo)5