Search in sources :

Example 1 with RemotePeerFactory

use of org.apache.hadoop.hdfs.RemotePeerFactory in project hadoop by apache.

the class BlockReaderTestUtil method getBlockReader.

/**
   * Get a BlockReader for the given block.
   */
public static BlockReader getBlockReader(final DistributedFileSystem fs, LocatedBlock testBlock, int offset, long lenToRead) throws IOException {
    InetSocketAddress targetAddr = null;
    ExtendedBlock block = testBlock.getBlock();
    DatanodeInfo[] nodes = testBlock.getLocations();
    targetAddr = NetUtils.createSocketAddr(nodes[0].getXferAddr());
    return new BlockReaderFactory(fs.getClient().getConf()).setInetSocketAddress(targetAddr).setBlock(block).setFileName(targetAddr.toString() + ":" + block.getBlockId()).setBlockToken(testBlock.getBlockToken()).setStartOffset(offset).setLength(lenToRead).setVerifyChecksum(true).setClientName("BlockReaderTestUtil").setDatanodeInfo(nodes[0]).setClientCacheContext(ClientContext.getFromConf(fs.getConf())).setCachingStrategy(CachingStrategy.newDefaultStrategy()).setConfiguration(fs.getConf()).setAllowShortCircuitLocalReads(true).setTracer(FsTracer.get(fs.getConf())).setRemotePeerFactory(new RemotePeerFactory() {

        @Override
        public Peer newConnectedPeer(InetSocketAddress addr, Token<BlockTokenIdentifier> blockToken, DatanodeID datanodeId) throws IOException {
            Peer peer = null;
            Socket sock = NetUtils.getDefaultSocketFactory(fs.getConf()).createSocket();
            try {
                sock.connect(addr, HdfsConstants.READ_TIMEOUT);
                sock.setSoTimeout(HdfsConstants.READ_TIMEOUT);
                peer = DFSUtilClient.peerFromSocket(sock);
            } finally {
                if (peer == null) {
                    IOUtils.closeQuietly(sock);
                }
            }
            return peer;
        }
    }).build();
}
Also used : DatanodeInfo(org.apache.hadoop.hdfs.protocol.DatanodeInfo) DatanodeID(org.apache.hadoop.hdfs.protocol.DatanodeID) InetSocketAddress(java.net.InetSocketAddress) Peer(org.apache.hadoop.hdfs.net.Peer) ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) Token(org.apache.hadoop.security.token.Token) RemotePeerFactory(org.apache.hadoop.hdfs.RemotePeerFactory) Socket(java.net.Socket)

Example 2 with RemotePeerFactory

use of org.apache.hadoop.hdfs.RemotePeerFactory in project hadoop by apache.

the class NamenodeFsck method copyBlock.

/*
   * XXX (ab) Bulk of this method is copied verbatim from {@link DFSClient}, which is
   * bad. Both places should be refactored to provide a method to copy blocks
   * around.
   */
private void copyBlock(final DFSClient dfs, LocatedBlock lblock, OutputStream fos) throws Exception {
    int failures = 0;
    InetSocketAddress targetAddr = null;
    TreeSet<DatanodeInfo> deadNodes = new TreeSet<DatanodeInfo>();
    BlockReader blockReader = null;
    ExtendedBlock block = lblock.getBlock();
    while (blockReader == null) {
        DatanodeInfo chosenNode;
        try {
            chosenNode = bestNode(dfs, lblock.getLocations(), deadNodes);
            targetAddr = NetUtils.createSocketAddr(chosenNode.getXferAddr());
        } catch (IOException ie) {
            if (failures >= HdfsClientConfigKeys.DFS_CLIENT_MAX_BLOCK_ACQUIRE_FAILURES_DEFAULT) {
                throw new IOException("Could not obtain block " + lblock, ie);
            }
            LOG.info("Could not obtain block from any node:  " + ie);
            try {
                Thread.sleep(10000);
            } catch (InterruptedException iex) {
            }
            deadNodes.clear();
            failures++;
            continue;
        }
        try {
            String file = BlockReaderFactory.getFileName(targetAddr, block.getBlockPoolId(), block.getBlockId());
            blockReader = new BlockReaderFactory(dfs.getConf()).setFileName(file).setBlock(block).setBlockToken(lblock.getBlockToken()).setStartOffset(0).setLength(block.getNumBytes()).setVerifyChecksum(true).setClientName("fsck").setDatanodeInfo(chosenNode).setInetSocketAddress(targetAddr).setCachingStrategy(CachingStrategy.newDropBehind()).setClientCacheContext(dfs.getClientContext()).setConfiguration(namenode.getConf()).setTracer(tracer).setRemotePeerFactory(new RemotePeerFactory() {

                @Override
                public Peer newConnectedPeer(InetSocketAddress addr, Token<BlockTokenIdentifier> blockToken, DatanodeID datanodeId) throws IOException {
                    Peer peer = null;
                    Socket s = NetUtils.getDefaultSocketFactory(conf).createSocket();
                    try {
                        s.connect(addr, HdfsConstants.READ_TIMEOUT);
                        s.setSoTimeout(HdfsConstants.READ_TIMEOUT);
                        peer = DFSUtilClient.peerFromSocketAndKey(dfs.getSaslDataTransferClient(), s, NamenodeFsck.this, blockToken, datanodeId, HdfsConstants.READ_TIMEOUT);
                    } finally {
                        if (peer == null) {
                            IOUtils.closeQuietly(s);
                        }
                    }
                    return peer;
                }
            }).build();
        } catch (IOException ex) {
            // Put chosen node into dead list, continue
            LOG.info("Failed to connect to " + targetAddr + ":" + ex);
            deadNodes.add(chosenNode);
        }
    }
    byte[] buf = new byte[1024];
    int cnt = 0;
    boolean success = true;
    long bytesRead = 0;
    try {
        while ((cnt = blockReader.read(buf, 0, buf.length)) > 0) {
            fos.write(buf, 0, cnt);
            bytesRead += cnt;
        }
        if (bytesRead != block.getNumBytes()) {
            throw new IOException("Recorded block size is " + block.getNumBytes() + ", but datanode returned " + bytesRead + " bytes");
        }
    } catch (Exception e) {
        LOG.error("Error reading block", e);
        success = false;
    } finally {
        blockReader.close();
    }
    if (!success) {
        throw new Exception("Could not copy block data for " + lblock.getBlock());
    }
}
Also used : DatanodeInfo(org.apache.hadoop.hdfs.protocol.DatanodeInfo) InetSocketAddress(java.net.InetSocketAddress) BlockReader(org.apache.hadoop.hdfs.BlockReader) Peer(org.apache.hadoop.hdfs.net.Peer) ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) IOException(java.io.IOException) UnresolvedLinkException(org.apache.hadoop.fs.UnresolvedLinkException) FileNotFoundException(java.io.FileNotFoundException) IOException(java.io.IOException) AccessControlException(org.apache.hadoop.security.AccessControlException) DatanodeID(org.apache.hadoop.hdfs.protocol.DatanodeID) BlockTokenIdentifier(org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier) TreeSet(java.util.TreeSet) BlockReaderFactory(org.apache.hadoop.hdfs.client.impl.BlockReaderFactory) RemotePeerFactory(org.apache.hadoop.hdfs.RemotePeerFactory) Socket(java.net.Socket)

Example 3 with RemotePeerFactory

use of org.apache.hadoop.hdfs.RemotePeerFactory in project hadoop by apache.

the class TestBlockTokenWithDFS method tryRead.

// try reading a block using a BlockReader directly
protected void tryRead(final Configuration conf, LocatedBlock lblock, boolean shouldSucceed) {
    InetSocketAddress targetAddr = null;
    IOException ioe = null;
    BlockReader blockReader = null;
    ExtendedBlock block = lblock.getBlock();
    try {
        DatanodeInfo[] nodes = lblock.getLocations();
        targetAddr = NetUtils.createSocketAddr(nodes[0].getXferAddr());
        blockReader = new BlockReaderFactory(new DfsClientConf(conf)).setFileName(BlockReaderFactory.getFileName(targetAddr, "test-blockpoolid", block.getBlockId())).setBlock(block).setBlockToken(lblock.getBlockToken()).setInetSocketAddress(targetAddr).setStartOffset(0).setLength(0).setVerifyChecksum(true).setClientName("TestBlockTokenWithDFS").setDatanodeInfo(nodes[0]).setCachingStrategy(CachingStrategy.newDefaultStrategy()).setClientCacheContext(ClientContext.getFromConf(conf)).setConfiguration(conf).setTracer(FsTracer.get(conf)).setRemotePeerFactory(new RemotePeerFactory() {

            @Override
            public Peer newConnectedPeer(InetSocketAddress addr, Token<BlockTokenIdentifier> blockToken, DatanodeID datanodeId) throws IOException {
                Peer peer = null;
                Socket sock = NetUtils.getDefaultSocketFactory(conf).createSocket();
                try {
                    sock.connect(addr, HdfsConstants.READ_TIMEOUT);
                    sock.setSoTimeout(HdfsConstants.READ_TIMEOUT);
                    peer = DFSUtilClient.peerFromSocket(sock);
                } finally {
                    if (peer == null) {
                        IOUtils.closeSocket(sock);
                    }
                }
                return peer;
            }
        }).build();
    } catch (IOException ex) {
        ioe = ex;
    } finally {
        if (blockReader != null) {
            try {
                blockReader.close();
            } catch (IOException e) {
                throw new RuntimeException(e);
            }
        }
    }
    if (shouldSucceed) {
        Assert.assertNotNull("OP_READ_BLOCK: access token is invalid, " + "when it is expected to be valid", blockReader);
    } else {
        Assert.assertNotNull("OP_READ_BLOCK: access token is valid, " + "when it is expected to be invalid", ioe);
        Assert.assertTrue("OP_READ_BLOCK failed due to reasons other than access token: ", ioe instanceof InvalidBlockTokenException);
    }
}
Also used : DatanodeInfo(org.apache.hadoop.hdfs.protocol.DatanodeInfo) InetSocketAddress(java.net.InetSocketAddress) BlockReader(org.apache.hadoop.hdfs.BlockReader) Peer(org.apache.hadoop.hdfs.net.Peer) ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) IOException(java.io.IOException) DfsClientConf(org.apache.hadoop.hdfs.client.impl.DfsClientConf) DatanodeID(org.apache.hadoop.hdfs.protocol.DatanodeID) BlockTokenIdentifier(org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier) InvalidBlockTokenException(org.apache.hadoop.hdfs.security.token.block.InvalidBlockTokenException) BlockReaderFactory(org.apache.hadoop.hdfs.client.impl.BlockReaderFactory) RemotePeerFactory(org.apache.hadoop.hdfs.RemotePeerFactory) Socket(java.net.Socket)

Example 4 with RemotePeerFactory

use of org.apache.hadoop.hdfs.RemotePeerFactory in project hadoop by apache.

the class TestDataNodeVolumeFailure method accessBlock.

/**
   * try to access a block on a data node. If fails - throws exception
   * @param datanode
   * @param lblock
   * @throws IOException
   */
private void accessBlock(DatanodeInfo datanode, LocatedBlock lblock) throws IOException {
    InetSocketAddress targetAddr = null;
    ExtendedBlock block = lblock.getBlock();
    targetAddr = NetUtils.createSocketAddr(datanode.getXferAddr());
    BlockReader blockReader = new BlockReaderFactory(new DfsClientConf(conf)).setInetSocketAddress(targetAddr).setBlock(block).setFileName(BlockReaderFactory.getFileName(targetAddr, "test-blockpoolid", block.getBlockId())).setBlockToken(lblock.getBlockToken()).setStartOffset(0).setLength(0).setVerifyChecksum(true).setClientName("TestDataNodeVolumeFailure").setDatanodeInfo(datanode).setCachingStrategy(CachingStrategy.newDefaultStrategy()).setClientCacheContext(ClientContext.getFromConf(conf)).setConfiguration(conf).setTracer(FsTracer.get(conf)).setRemotePeerFactory(new RemotePeerFactory() {

        @Override
        public Peer newConnectedPeer(InetSocketAddress addr, Token<BlockTokenIdentifier> blockToken, DatanodeID datanodeId) throws IOException {
            Peer peer = null;
            Socket sock = NetUtils.getDefaultSocketFactory(conf).createSocket();
            try {
                sock.connect(addr, HdfsConstants.READ_TIMEOUT);
                sock.setSoTimeout(HdfsConstants.READ_TIMEOUT);
                peer = DFSUtilClient.peerFromSocket(sock);
            } finally {
                if (peer == null) {
                    IOUtils.closeSocket(sock);
                }
            }
            return peer;
        }
    }).build();
    blockReader.close();
}
Also used : DfsClientConf(org.apache.hadoop.hdfs.client.impl.DfsClientConf) DatanodeID(org.apache.hadoop.hdfs.protocol.DatanodeID) InetSocketAddress(java.net.InetSocketAddress) BlockReader(org.apache.hadoop.hdfs.BlockReader) Peer(org.apache.hadoop.hdfs.net.Peer) ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) BlockReaderFactory(org.apache.hadoop.hdfs.client.impl.BlockReaderFactory) Token(org.apache.hadoop.security.token.Token) RemotePeerFactory(org.apache.hadoop.hdfs.RemotePeerFactory) Socket(java.net.Socket)

Aggregations

InetSocketAddress (java.net.InetSocketAddress)4 Socket (java.net.Socket)4 RemotePeerFactory (org.apache.hadoop.hdfs.RemotePeerFactory)4 Peer (org.apache.hadoop.hdfs.net.Peer)4 DatanodeID (org.apache.hadoop.hdfs.protocol.DatanodeID)4 ExtendedBlock (org.apache.hadoop.hdfs.protocol.ExtendedBlock)4 BlockReader (org.apache.hadoop.hdfs.BlockReader)3 BlockReaderFactory (org.apache.hadoop.hdfs.client.impl.BlockReaderFactory)3 DatanodeInfo (org.apache.hadoop.hdfs.protocol.DatanodeInfo)3 IOException (java.io.IOException)2 DfsClientConf (org.apache.hadoop.hdfs.client.impl.DfsClientConf)2 BlockTokenIdentifier (org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier)2 Token (org.apache.hadoop.security.token.Token)2 FileNotFoundException (java.io.FileNotFoundException)1 TreeSet (java.util.TreeSet)1 UnresolvedLinkException (org.apache.hadoop.fs.UnresolvedLinkException)1 InvalidBlockTokenException (org.apache.hadoop.hdfs.security.token.block.InvalidBlockTokenException)1 AccessControlException (org.apache.hadoop.security.AccessControlException)1