Search in sources :

Example 11 with DatanodeInfoBuilder

use of org.apache.hadoop.hdfs.protocol.DatanodeInfo.DatanodeInfoBuilder in project hadoop by apache.

the class DFSStripedOutputStream method updatePipeline.

private void updatePipeline(ExtendedBlock newBG) throws IOException {
    final DatanodeInfo[] newNodes = new DatanodeInfo[numAllBlocks];
    final String[] newStorageIDs = new String[numAllBlocks];
    for (int i = 0; i < numAllBlocks; i++) {
        final StripedDataStreamer streamer = getStripedDataStreamer(i);
        final DatanodeInfo[] nodes = streamer.getNodes();
        final String[] storageIDs = streamer.getStorageIDs();
        if (streamer.isHealthy() && nodes != null && storageIDs != null) {
            newNodes[i] = nodes[0];
            newStorageIDs[i] = storageIDs[0];
        } else {
            newNodes[i] = new DatanodeInfoBuilder().setNodeID(DatanodeID.EMPTY_DATANODE_ID).build();
            newStorageIDs[i] = "";
        }
    }
    dfsClient.namenode.updatePipeline(dfsClient.clientName, currentBlockGroup, newBG, newNodes, newStorageIDs);
    currentBlockGroup = newBG;
}
Also used : DatanodeInfo(org.apache.hadoop.hdfs.protocol.DatanodeInfo) DatanodeInfoBuilder(org.apache.hadoop.hdfs.protocol.DatanodeInfo.DatanodeInfoBuilder)

Example 12 with DatanodeInfoBuilder

use of org.apache.hadoop.hdfs.protocol.DatanodeInfo.DatanodeInfoBuilder in project hadoop by apache.

the class FSNamesystem method getDatanodeStorageReport.

DatanodeStorageReport[] getDatanodeStorageReport(final DatanodeReportType type) throws AccessControlException, StandbyException {
    checkSuperuserPrivilege();
    checkOperation(OperationCategory.UNCHECKED);
    readLock();
    try {
        checkOperation(OperationCategory.UNCHECKED);
        final DatanodeManager dm = getBlockManager().getDatanodeManager();
        final List<DatanodeDescriptor> datanodes = dm.getDatanodeListForReport(type);
        DatanodeStorageReport[] reports = new DatanodeStorageReport[datanodes.size()];
        for (int i = 0; i < reports.length; i++) {
            final DatanodeDescriptor d = datanodes.get(i);
            reports[i] = new DatanodeStorageReport(new DatanodeInfoBuilder().setFrom(d).build(), d.getStorageReports());
        }
        return reports;
    } finally {
        readUnlock("getDatanodeStorageReport");
    }
}
Also used : DatanodeDescriptor(org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor) DatanodeManager(org.apache.hadoop.hdfs.server.blockmanagement.DatanodeManager) DatanodeInfoBuilder(org.apache.hadoop.hdfs.protocol.DatanodeInfo.DatanodeInfoBuilder) DatanodeStorageReport(org.apache.hadoop.hdfs.server.protocol.DatanodeStorageReport)

Example 13 with DatanodeInfoBuilder

use of org.apache.hadoop.hdfs.protocol.DatanodeInfo.DatanodeInfoBuilder in project hadoop by apache.

the class TestDFSClientSocketSize method getSendBufferSize.

private int getSendBufferSize(Configuration conf) throws IOException {
    final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
    try {
        cluster.waitActive();
        LOG.info("MiniDFSCluster started.");
        try (Socket socket = DataStreamer.createSocketForPipeline(new DatanodeInfoBuilder().setNodeID(cluster.dataNodes.get(0).datanode.getDatanodeId()).build(), 1, cluster.getFileSystem().getClient())) {
            return socket.getSendBufferSize();
        }
    } finally {
        if (cluster != null) {
            cluster.shutdown();
        }
    }
}
Also used : DatanodeInfoBuilder(org.apache.hadoop.hdfs.protocol.DatanodeInfo.DatanodeInfoBuilder) DatanodeInfoBuilder(org.apache.hadoop.hdfs.protocol.DatanodeInfo.DatanodeInfoBuilder) Socket(java.net.Socket)

Example 14 with DatanodeInfoBuilder

use of org.apache.hadoop.hdfs.protocol.DatanodeInfo.DatanodeInfoBuilder in project hadoop by apache.

the class TestFileCorruption method testArrayOutOfBoundsException.

/** Test the case that a replica is reported corrupt while it is not
   * in blocksMap. Make sure that ArrayIndexOutOfBounds does not thrown.
   * See Hadoop-4351.
   */
@Test
public void testArrayOutOfBoundsException() throws Exception {
    MiniDFSCluster cluster = null;
    try {
        Configuration conf = new HdfsConfiguration();
        cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
        cluster.waitActive();
        FileSystem fs = cluster.getFileSystem();
        final Path FILE_PATH = new Path("/tmp.txt");
        final long FILE_LEN = 1L;
        DFSTestUtil.createFile(fs, FILE_PATH, FILE_LEN, (short) 2, 1L);
        // get the block
        final String bpid = cluster.getNamesystem().getBlockPoolId();
        ExtendedBlock blk = getFirstBlock(cluster.getDataNodes().get(0), bpid);
        assertFalse("Data directory does not contain any blocks or there was an " + "IO error", blk == null);
        // start a third datanode
        cluster.startDataNodes(conf, 1, true, null, null);
        ArrayList<DataNode> datanodes = cluster.getDataNodes();
        assertEquals(datanodes.size(), 3);
        DataNode dataNode = datanodes.get(2);
        // report corrupted block by the third datanode
        DatanodeRegistration dnR = InternalDataNodeTestUtils.getDNRegistrationForBP(dataNode, blk.getBlockPoolId());
        FSNamesystem ns = cluster.getNamesystem();
        ns.writeLock();
        try {
            cluster.getNamesystem().getBlockManager().findAndMarkBlockAsCorrupt(blk, new DatanodeInfoBuilder().setNodeID(dnR).build(), "TEST", "STORAGE_ID");
        } finally {
            ns.writeUnlock();
        }
        // open the file
        fs.open(FILE_PATH);
        //clean up
        fs.delete(FILE_PATH, false);
    } finally {
        if (cluster != null) {
            cluster.shutdown();
        }
    }
}
Also used : Path(org.apache.hadoop.fs.Path) Configuration(org.apache.hadoop.conf.Configuration) DatanodeInfoBuilder(org.apache.hadoop.hdfs.protocol.DatanodeInfo.DatanodeInfoBuilder) ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) DatanodeRegistration(org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration) DataNode(org.apache.hadoop.hdfs.server.datanode.DataNode) FileSystem(org.apache.hadoop.fs.FileSystem) FSNamesystem(org.apache.hadoop.hdfs.server.namenode.FSNamesystem) Test(org.junit.Test)

Example 15 with DatanodeInfoBuilder

use of org.apache.hadoop.hdfs.protocol.DatanodeInfo.DatanodeInfoBuilder in project hadoop by apache.

the class TestInterDatanodeProtocol method testInterDNProtocolTimeout.

/** Test to verify that InterDatanode RPC timesout as expected when
   *  the server DN does not respond.
   */
@Test(expected = SocketTimeoutException.class)
public void testInterDNProtocolTimeout() throws Throwable {
    final Server server = new TestServer(1, true);
    server.start();
    final InetSocketAddress addr = NetUtils.getConnectAddress(server);
    DatanodeID fakeDnId = DFSTestUtil.getLocalDatanodeID(addr.getPort());
    DatanodeInfo dInfo = new DatanodeInfoBuilder().setNodeID(fakeDnId).build();
    InterDatanodeProtocol proxy = null;
    try {
        proxy = DataNode.createInterDataNodeProtocolProxy(dInfo, conf, 500, false);
        proxy.initReplicaRecovery(new RecoveringBlock(new ExtendedBlock("bpid", 1), null, 100));
        fail("Expected SocketTimeoutException exception, but did not get.");
    } finally {
        if (proxy != null) {
            RPC.stopProxy(proxy);
        }
        server.stop();
    }
}
Also used : DatanodeID(org.apache.hadoop.hdfs.protocol.DatanodeID) DatanodeInfo(org.apache.hadoop.hdfs.protocol.DatanodeInfo) Server(org.apache.hadoop.ipc.Server) DatanodeInfoBuilder(org.apache.hadoop.hdfs.protocol.DatanodeInfo.DatanodeInfoBuilder) InetSocketAddress(java.net.InetSocketAddress) RecoveringBlock(org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringBlock) ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) InterDatanodeProtocol(org.apache.hadoop.hdfs.server.protocol.InterDatanodeProtocol) Test(org.junit.Test)

Aggregations

DatanodeInfoBuilder (org.apache.hadoop.hdfs.protocol.DatanodeInfo.DatanodeInfoBuilder)15 DatanodeInfo (org.apache.hadoop.hdfs.protocol.DatanodeInfo)11 Test (org.junit.Test)6 IOException (java.io.IOException)5 Configuration (org.apache.hadoop.conf.Configuration)4 Path (org.apache.hadoop.fs.Path)3 DistributedFileSystem (org.apache.hadoop.hdfs.DistributedFileSystem)3 MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)3 PerDatanodeVisitorInfo (org.apache.hadoop.hdfs.shortcircuit.DfsClientShmManager.PerDatanodeVisitorInfo)3 Visitor (org.apache.hadoop.hdfs.shortcircuit.DfsClientShmManager.Visitor)3 TemporarySocketDirectory (org.apache.hadoop.net.unix.TemporarySocketDirectory)3 InetSocketAddress (java.net.InetSocketAddress)2 Socket (java.net.Socket)2 HashMap (java.util.HashMap)2 MutableBoolean (org.apache.commons.lang.mutable.MutableBoolean)2 DatanodeID (org.apache.hadoop.hdfs.protocol.DatanodeID)2 ExtendedBlock (org.apache.hadoop.hdfs.protocol.ExtendedBlock)2 DatanodeDescriptor (org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor)2 DatanodeManager (org.apache.hadoop.hdfs.server.blockmanagement.DatanodeManager)2 RecoveringBlock (org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringBlock)2