Search in sources :

Example 6 with ExtendedBlock

use of org.apache.hadoop.hdfs.protocol.ExtendedBlock in project hadoop by apache.

the class TestClientReportBadBlock method createAFileWithCorruptedBlockReplicas.

/**
   * Create a file with one block and corrupt some/all of the block replicas.
   */
private void createAFileWithCorruptedBlockReplicas(Path filePath, short repl, int corruptBlockCount) throws IOException, AccessControlException, FileNotFoundException, UnresolvedLinkException, InterruptedException, TimeoutException {
    DFSTestUtil.createFile(dfs, filePath, BLOCK_SIZE, repl, 0);
    DFSTestUtil.waitReplication(dfs, filePath, repl);
    // Locate the file blocks by asking name node
    final LocatedBlocks locatedblocks = dfs.dfs.getNamenode().getBlockLocations(filePath.toString(), 0L, BLOCK_SIZE);
    Assert.assertEquals(repl, locatedblocks.get(0).getLocations().length);
    // The file only has one block
    LocatedBlock lblock = locatedblocks.get(0);
    DatanodeInfo[] datanodeinfos = lblock.getLocations();
    ExtendedBlock block = lblock.getBlock();
    // corrupt some /all of the block replicas
    for (int i = 0; i < corruptBlockCount; i++) {
        DatanodeInfo dninfo = datanodeinfos[i];
        final DataNode dn = cluster.getDataNode(dninfo.getIpcPort());
        cluster.corruptReplica(dn, block);
        LOG.debug("Corrupted block " + block.getBlockName() + " on data node " + dninfo);
    }
}
Also used : DatanodeInfo(org.apache.hadoop.hdfs.protocol.DatanodeInfo) DataNode(org.apache.hadoop.hdfs.server.datanode.DataNode) LocatedBlocks(org.apache.hadoop.hdfs.protocol.LocatedBlocks) ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) LocatedBlock(org.apache.hadoop.hdfs.protocol.LocatedBlock)

Example 7 with ExtendedBlock

use of org.apache.hadoop.hdfs.protocol.ExtendedBlock in project hadoop by apache.

the class TestCrcCorruption method doTestEntirelyCorruptFile.

private void doTestEntirelyCorruptFile(int numDataNodes) throws Exception {
    long fileSize = 4096;
    Path file = new Path("/testFile");
    short replFactor = (short) numDataNodes;
    Configuration conf = new Configuration();
    conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, numDataNodes);
    // Set short retry timeouts so this test runs faster
    conf.setInt(HdfsClientConfigKeys.Retry.WINDOW_BASE_KEY, 10);
    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDataNodes).build();
    try {
        cluster.waitActive();
        FileSystem fs = cluster.getFileSystem();
        DFSTestUtil.createFile(fs, file, fileSize, replFactor, 12345L);
        DFSTestUtil.waitReplication(fs, file, replFactor);
        ExtendedBlock block = DFSTestUtil.getFirstBlock(fs, file);
        int blockFilesCorrupted = cluster.corruptBlockOnDataNodes(block);
        assertEquals("All replicas not corrupted", replFactor, blockFilesCorrupted);
        try {
            IOUtils.copyBytes(fs.open(file), new IOUtils.NullOutputStream(), conf, true);
            fail("Didn't get exception");
        } catch (IOException ioe) {
            DFSClient.LOG.info("Got expected exception", ioe);
        }
    } finally {
        cluster.shutdown();
    }
}
Also used : Path(org.apache.hadoop.fs.Path) IOUtils(org.apache.hadoop.io.IOUtils) Configuration(org.apache.hadoop.conf.Configuration) FileSystem(org.apache.hadoop.fs.FileSystem) ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) IOException(java.io.IOException)

Example 8 with ExtendedBlock

use of org.apache.hadoop.hdfs.protocol.ExtendedBlock in project hadoop by apache.

the class TestDFSClientRetries method testClientDNProtocolTimeout.

/** Test that timeout occurs when DN does not respond to RPC.
   * Start up a server and ask it to sleep for n seconds. Make an
   * RPC to the server and set rpcTimeout to less than n and ensure
   * that socketTimeoutException is obtained
   */
@Test
public void testClientDNProtocolTimeout() throws IOException {
    final Server server = new TestServer(1, true);
    server.start();
    final InetSocketAddress addr = NetUtils.getConnectAddress(server);
    DatanodeID fakeDnId = DFSTestUtil.getLocalDatanodeID(addr.getPort());
    ExtendedBlock b = new ExtendedBlock("fake-pool", new Block(12345L));
    LocatedBlock fakeBlock = new LocatedBlock(b, new DatanodeInfo[0]);
    ClientDatanodeProtocol proxy = null;
    try {
        proxy = DFSUtilClient.createClientDatanodeProtocolProxy(fakeDnId, conf, 500, false, fakeBlock);
        proxy.getReplicaVisibleLength(new ExtendedBlock("bpid", 1));
        fail("Did not get expected exception: SocketTimeoutException");
    } catch (SocketTimeoutException e) {
        LOG.info("Got the expected Exception: SocketTimeoutException");
    } finally {
        if (proxy != null) {
            RPC.stopProxy(proxy);
        }
        server.stop();
    }
}
Also used : DatanodeID(org.apache.hadoop.hdfs.protocol.DatanodeID) SocketTimeoutException(java.net.SocketTimeoutException) Server(org.apache.hadoop.ipc.Server) InetSocketAddress(java.net.InetSocketAddress) ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) Block(org.apache.hadoop.hdfs.protocol.Block) ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) LocatedBlock(org.apache.hadoop.hdfs.protocol.LocatedBlock) LocatedBlock(org.apache.hadoop.hdfs.protocol.LocatedBlock) ClientDatanodeProtocol(org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol) Test(org.junit.Test)

Example 9 with ExtendedBlock

use of org.apache.hadoop.hdfs.protocol.ExtendedBlock in project hadoop by apache.

the class TestReconstructStripedFile method testProcessErasureCodingTasksSubmitionShouldSucceed.

/*
   * Tests that processErasureCodingTasks should not throw exceptions out due to
   * invalid ECTask submission.
   */
@Test
public void testProcessErasureCodingTasksSubmitionShouldSucceed() throws Exception {
    DataNode dataNode = cluster.dataNodes.get(0).datanode;
    // Pack invalid(dummy) parameters in ecTasks. Irrespective of parameters, each task
    // thread pool submission should succeed, so that it will not prevent
    // processing other tasks in the list if any exceptions.
    int size = cluster.dataNodes.size();
    byte[] liveIndices = new byte[size];
    DatanodeInfo[] dataDNs = new DatanodeInfo[size + 1];
    DatanodeStorageInfo targetDnInfos_1 = BlockManagerTestUtil.newDatanodeStorageInfo(DFSTestUtil.getLocalDatanodeDescriptor(), new DatanodeStorage("s01"));
    DatanodeStorageInfo[] dnStorageInfo = new DatanodeStorageInfo[] { targetDnInfos_1 };
    BlockECReconstructionInfo invalidECInfo = new BlockECReconstructionInfo(new ExtendedBlock("bp-id", 123456), dataDNs, dnStorageInfo, liveIndices, StripedFileTestUtil.getDefaultECPolicy());
    List<BlockECReconstructionInfo> ecTasks = new ArrayList<>();
    ecTasks.add(invalidECInfo);
    dataNode.getErasureCodingWorker().processErasureCodingTasks(ecTasks);
}
Also used : BlockECReconstructionInfo(org.apache.hadoop.hdfs.server.protocol.BlockECReconstructionCommand.BlockECReconstructionInfo) DatanodeInfo(org.apache.hadoop.hdfs.protocol.DatanodeInfo) DatanodeStorageInfo(org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo) DataNode(org.apache.hadoop.hdfs.server.datanode.DataNode) DatanodeStorage(org.apache.hadoop.hdfs.server.protocol.DatanodeStorage) ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) ArrayList(java.util.ArrayList) Test(org.junit.Test)

Example 10 with ExtendedBlock

use of org.apache.hadoop.hdfs.protocol.ExtendedBlock in project hadoop by apache.

the class BlockReaderTestUtil method getBlockReader.

/**
   * Get a BlockReader for the given block.
   */
public static BlockReader getBlockReader(final DistributedFileSystem fs, LocatedBlock testBlock, int offset, long lenToRead) throws IOException {
    InetSocketAddress targetAddr = null;
    ExtendedBlock block = testBlock.getBlock();
    DatanodeInfo[] nodes = testBlock.getLocations();
    targetAddr = NetUtils.createSocketAddr(nodes[0].getXferAddr());
    return new BlockReaderFactory(fs.getClient().getConf()).setInetSocketAddress(targetAddr).setBlock(block).setFileName(targetAddr.toString() + ":" + block.getBlockId()).setBlockToken(testBlock.getBlockToken()).setStartOffset(offset).setLength(lenToRead).setVerifyChecksum(true).setClientName("BlockReaderTestUtil").setDatanodeInfo(nodes[0]).setClientCacheContext(ClientContext.getFromConf(fs.getConf())).setCachingStrategy(CachingStrategy.newDefaultStrategy()).setConfiguration(fs.getConf()).setAllowShortCircuitLocalReads(true).setTracer(FsTracer.get(fs.getConf())).setRemotePeerFactory(new RemotePeerFactory() {

        @Override
        public Peer newConnectedPeer(InetSocketAddress addr, Token<BlockTokenIdentifier> blockToken, DatanodeID datanodeId) throws IOException {
            Peer peer = null;
            Socket sock = NetUtils.getDefaultSocketFactory(fs.getConf()).createSocket();
            try {
                sock.connect(addr, HdfsConstants.READ_TIMEOUT);
                sock.setSoTimeout(HdfsConstants.READ_TIMEOUT);
                peer = DFSUtilClient.peerFromSocket(sock);
            } finally {
                if (peer == null) {
                    IOUtils.closeQuietly(sock);
                }
            }
            return peer;
        }
    }).build();
}
Also used : DatanodeInfo(org.apache.hadoop.hdfs.protocol.DatanodeInfo) DatanodeID(org.apache.hadoop.hdfs.protocol.DatanodeID) InetSocketAddress(java.net.InetSocketAddress) Peer(org.apache.hadoop.hdfs.net.Peer) ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) Token(org.apache.hadoop.security.token.Token) RemotePeerFactory(org.apache.hadoop.hdfs.RemotePeerFactory) Socket(java.net.Socket)

Aggregations

ExtendedBlock (org.apache.hadoop.hdfs.protocol.ExtendedBlock)208 Test (org.junit.Test)124 Path (org.apache.hadoop.fs.Path)91 Configuration (org.apache.hadoop.conf.Configuration)71 MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)63 FileSystem (org.apache.hadoop.fs.FileSystem)62 HdfsConfiguration (org.apache.hadoop.hdfs.HdfsConfiguration)55 LocatedBlock (org.apache.hadoop.hdfs.protocol.LocatedBlock)53 IOException (java.io.IOException)41 DatanodeInfo (org.apache.hadoop.hdfs.protocol.DatanodeInfo)41 Block (org.apache.hadoop.hdfs.protocol.Block)38 DataNode (org.apache.hadoop.hdfs.server.datanode.DataNode)34 DistributedFileSystem (org.apache.hadoop.hdfs.DistributedFileSystem)32 File (java.io.File)22 FSDataOutputStream (org.apache.hadoop.fs.FSDataOutputStream)20 LocatedBlocks (org.apache.hadoop.hdfs.protocol.LocatedBlocks)20 DatanodeID (org.apache.hadoop.hdfs.protocol.DatanodeID)18 FSNamesystem (org.apache.hadoop.hdfs.server.namenode.FSNamesystem)18 InetSocketAddress (java.net.InetSocketAddress)17 ArrayList (java.util.ArrayList)17