Search in sources :

Example 21 with DatanodeInfo

use of org.apache.hadoop.hdfs.protocol.DatanodeInfo in project hadoop by apache.

the class TestGetBlocks method testGetBlocks.

/** test getBlocks */
@Test
public void testGetBlocks() throws Exception {
    final Configuration CONF = new HdfsConfiguration();
    final short REPLICATION_FACTOR = (short) 2;
    final int DEFAULT_BLOCK_SIZE = 1024;
    CONF.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, DEFAULT_BLOCK_SIZE);
    CONF.setLong(DFSConfigKeys.DFS_BALANCER_GETBLOCKS_MIN_BLOCK_SIZE_KEY, DEFAULT_BLOCK_SIZE);
    MiniDFSCluster cluster = new MiniDFSCluster.Builder(CONF).numDataNodes(REPLICATION_FACTOR).build();
    try {
        cluster.waitActive();
        // the third block will not be visible to getBlocks
        long fileLen = 2 * DEFAULT_BLOCK_SIZE + 1;
        DFSTestUtil.createFile(cluster.getFileSystem(), new Path("/tmp.txt"), fileLen, REPLICATION_FACTOR, 0L);
        // get blocks & data nodes
        List<LocatedBlock> locatedBlocks;
        DatanodeInfo[] dataNodes = null;
        boolean notWritten;
        do {
            final DFSClient dfsclient = new DFSClient(DFSUtilClient.getNNAddress(CONF), CONF);
            locatedBlocks = dfsclient.getNamenode().getBlockLocations("/tmp.txt", 0, fileLen).getLocatedBlocks();
            assertEquals(3, locatedBlocks.size());
            notWritten = false;
            for (int i = 0; i < 2; i++) {
                dataNodes = locatedBlocks.get(i).getLocations();
                if (dataNodes.length != REPLICATION_FACTOR) {
                    notWritten = true;
                    try {
                        Thread.sleep(10);
                    } catch (InterruptedException e) {
                    }
                    break;
                }
            }
        } while (notWritten);
        // get RPC client to namenode
        InetSocketAddress addr = new InetSocketAddress("localhost", cluster.getNameNodePort());
        NamenodeProtocol namenode = NameNodeProxies.createProxy(CONF, DFSUtilClient.getNNUri(addr), NamenodeProtocol.class).getProxy();
        // get blocks of size fileLen from dataNodes[0]
        BlockWithLocations[] locs;
        locs = namenode.getBlocks(dataNodes[0], fileLen).getBlocks();
        assertEquals(locs.length, 2);
        assertEquals(locs[0].getStorageIDs().length, 2);
        assertEquals(locs[1].getStorageIDs().length, 2);
        // get blocks of size BlockSize from dataNodes[0]
        locs = namenode.getBlocks(dataNodes[0], DEFAULT_BLOCK_SIZE).getBlocks();
        assertEquals(locs.length, 1);
        assertEquals(locs[0].getStorageIDs().length, 2);
        // get blocks of size 1 from dataNodes[0]
        locs = namenode.getBlocks(dataNodes[0], 1).getBlocks();
        assertEquals(locs.length, 1);
        assertEquals(locs[0].getStorageIDs().length, 2);
        // get blocks of size 0 from dataNodes[0]
        getBlocksWithException(namenode, dataNodes[0], 0);
        // get blocks of size -1 from dataNodes[0]
        getBlocksWithException(namenode, dataNodes[0], -1);
        // get blocks of size BlockSize from a non-existent datanode
        DatanodeInfo info = DFSTestUtil.getDatanodeInfo("1.2.3.4");
        getBlocksWithException(namenode, info, 2);
    } finally {
        cluster.shutdown();
    }
}
Also used : Path(org.apache.hadoop.fs.Path) DatanodeInfo(org.apache.hadoop.hdfs.protocol.DatanodeInfo) Configuration(org.apache.hadoop.conf.Configuration) InetSocketAddress(java.net.InetSocketAddress) BlockWithLocations(org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations.BlockWithLocations) LocatedBlock(org.apache.hadoop.hdfs.protocol.LocatedBlock) NamenodeProtocol(org.apache.hadoop.hdfs.server.protocol.NamenodeProtocol) Test(org.junit.Test)

Example 22 with DatanodeInfo

use of org.apache.hadoop.hdfs.protocol.DatanodeInfo in project hadoop by apache.

the class TestFileCreation method testLeaseExpireHardLimit.

/**
   * Create a file, write something, hflush but not close.
   * Then change lease period and wait for lease recovery.
   * Finally, read the block directly from each Datanode and verify the content.
   */
@Test
public void testLeaseExpireHardLimit() throws Exception {
    System.out.println("testLeaseExpireHardLimit start");
    final long leasePeriod = 1000;
    final int DATANODE_NUM = 3;
    Configuration conf = new HdfsConfiguration();
    conf.setInt(DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 1000);
    conf.setInt(DFS_HEARTBEAT_INTERVAL_KEY, 1);
    // create cluster
    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(DATANODE_NUM).build();
    DistributedFileSystem dfs = null;
    try {
        cluster.waitActive();
        dfs = cluster.getFileSystem();
        // create a new file.
        final String f = DIR + "foo";
        final Path fpath = new Path(f);
        HdfsDataOutputStream out = create(dfs, fpath, DATANODE_NUM);
        out.write("something".getBytes());
        out.hflush();
        int actualRepl = out.getCurrentBlockReplication();
        assertTrue(f + " should be replicated to " + DATANODE_NUM + " datanodes.", actualRepl == DATANODE_NUM);
        // set the soft and hard limit to be 1 second so that the
        // namenode triggers lease recovery
        cluster.setLeasePeriod(leasePeriod, leasePeriod);
        // wait for the lease to expire
        try {
            Thread.sleep(5 * leasePeriod);
        } catch (InterruptedException e) {
        }
        LocatedBlocks locations = dfs.dfs.getNamenode().getBlockLocations(f, 0, Long.MAX_VALUE);
        assertEquals(1, locations.locatedBlockCount());
        LocatedBlock locatedblock = locations.getLocatedBlocks().get(0);
        int successcount = 0;
        for (DatanodeInfo datanodeinfo : locatedblock.getLocations()) {
            DataNode datanode = cluster.getDataNode(datanodeinfo.getIpcPort());
            ExtendedBlock blk = locatedblock.getBlock();
            try (BufferedReader in = new BufferedReader(new InputStreamReader(datanode.getFSDataset().getBlockInputStream(blk, 0)))) {
                assertEquals("something", in.readLine());
                successcount++;
            }
        }
        System.out.println("successcount=" + successcount);
        assertTrue(successcount > 0);
    } finally {
        IOUtils.closeStream(dfs);
        cluster.shutdown();
    }
    System.out.println("testLeaseExpireHardLimit successful");
}
Also used : Path(org.apache.hadoop.fs.Path) DatanodeInfo(org.apache.hadoop.hdfs.protocol.DatanodeInfo) Configuration(org.apache.hadoop.conf.Configuration) InputStreamReader(java.io.InputStreamReader) LocatedBlocks(org.apache.hadoop.hdfs.protocol.LocatedBlocks) ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) LocatedBlock(org.apache.hadoop.hdfs.protocol.LocatedBlock) DataNode(org.apache.hadoop.hdfs.server.datanode.DataNode) BufferedReader(java.io.BufferedReader) HdfsDataOutputStream(org.apache.hadoop.hdfs.client.HdfsDataOutputStream) Test(org.junit.Test)

Example 23 with DatanodeInfo

use of org.apache.hadoop.hdfs.protocol.DatanodeInfo in project hadoop by apache.

the class TestPBHelper method testDataNodeInfoPBHelper.

@Test
public void testDataNodeInfoPBHelper() {
    DatanodeID id = DFSTestUtil.getLocalDatanodeID();
    DatanodeInfo dnInfos0 = new DatanodeInfoBuilder().setNodeID(id).build();
    dnInfos0.setCapacity(3500L);
    dnInfos0.setDfsUsed(1000L);
    dnInfos0.setNonDfsUsed(2000L);
    dnInfos0.setRemaining(500L);
    HdfsProtos.DatanodeInfoProto dnproto = PBHelperClient.convert(dnInfos0);
    DatanodeInfo dnInfos1 = PBHelperClient.convert(dnproto);
    compare(dnInfos0, dnInfos1);
    assertEquals(dnInfos0.getNonDfsUsed(), dnInfos1.getNonDfsUsed());
    //Testing without nonDfs field
    HdfsProtos.DatanodeInfoProto.Builder b = HdfsProtos.DatanodeInfoProto.newBuilder();
    b.setId(PBHelperClient.convert(id)).setCapacity(3500L).setDfsUsed(1000L).setRemaining(500L);
    DatanodeInfo dnInfos3 = PBHelperClient.convert(b.build());
    assertEquals(dnInfos0.getNonDfsUsed(), dnInfos3.getNonDfsUsed());
}
Also used : DatanodeID(org.apache.hadoop.hdfs.protocol.DatanodeID) DatanodeInfo(org.apache.hadoop.hdfs.protocol.DatanodeInfo) HdfsProtos(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos) DatanodeInfoBuilder(org.apache.hadoop.hdfs.protocol.DatanodeInfo.DatanodeInfoBuilder) Test(org.junit.Test)

Example 24 with DatanodeInfo

use of org.apache.hadoop.hdfs.protocol.DatanodeInfo in project hadoop by apache.

the class TestPBHelper method testConvertBlockCommand.

@Test
public void testConvertBlockCommand() {
    Block[] blocks = new Block[] { new Block(21), new Block(22) };
    DatanodeInfo[][] dnInfos = new DatanodeInfo[][] { new DatanodeInfo[1], new DatanodeInfo[2] };
    dnInfos[0][0] = DFSTestUtil.getLocalDatanodeInfo();
    dnInfos[1][0] = DFSTestUtil.getLocalDatanodeInfo();
    dnInfos[1][1] = DFSTestUtil.getLocalDatanodeInfo();
    String[][] storageIDs = { { "s00" }, { "s10", "s11" } };
    StorageType[][] storageTypes = { { StorageType.DEFAULT }, { StorageType.DEFAULT, StorageType.DEFAULT } };
    BlockCommand bc = new BlockCommand(DatanodeProtocol.DNA_TRANSFER, "bp1", blocks, dnInfos, storageTypes, storageIDs);
    BlockCommandProto bcProto = PBHelper.convert(bc);
    BlockCommand bc2 = PBHelper.convert(bcProto);
    assertEquals(bc.getAction(), bc2.getAction());
    assertEquals(bc.getBlocks().length, bc2.getBlocks().length);
    Block[] blocks2 = bc2.getBlocks();
    for (int i = 0; i < blocks.length; i++) {
        assertEquals(blocks[i], blocks2[i]);
    }
    DatanodeInfo[][] dnInfos2 = bc2.getTargets();
    assertEquals(dnInfos.length, dnInfos2.length);
    for (int i = 0; i < dnInfos.length; i++) {
        DatanodeInfo[] d1 = dnInfos[i];
        DatanodeInfo[] d2 = dnInfos2[i];
        assertEquals(d1.length, d2.length);
        for (int j = 0; j < d1.length; j++) {
            compare(d1[j], d2[j]);
        }
    }
}
Also used : DatanodeInfo(org.apache.hadoop.hdfs.protocol.DatanodeInfo) Block(org.apache.hadoop.hdfs.protocol.Block) ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) RecoveringBlock(org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringBlock) LocatedBlock(org.apache.hadoop.hdfs.protocol.LocatedBlock) BlockCommandProto(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto) BlockCommand(org.apache.hadoop.hdfs.server.protocol.BlockCommand) Test(org.junit.Test)

Example 25 with DatanodeInfo

use of org.apache.hadoop.hdfs.protocol.DatanodeInfo in project hadoop by apache.

the class TestPBHelper method testBlockECRecoveryCommand.

@Test
public void testBlockECRecoveryCommand() {
    DatanodeInfo[] dnInfos0 = new DatanodeInfo[] { DFSTestUtil.getLocalDatanodeInfo(), DFSTestUtil.getLocalDatanodeInfo() };
    DatanodeStorageInfo targetDnInfos_0 = BlockManagerTestUtil.newDatanodeStorageInfo(DFSTestUtil.getLocalDatanodeDescriptor(), new DatanodeStorage("s00"));
    DatanodeStorageInfo targetDnInfos_1 = BlockManagerTestUtil.newDatanodeStorageInfo(DFSTestUtil.getLocalDatanodeDescriptor(), new DatanodeStorage("s01"));
    DatanodeStorageInfo[] targetDnInfos0 = new DatanodeStorageInfo[] { targetDnInfos_0, targetDnInfos_1 };
    byte[] liveBlkIndices0 = new byte[2];
    BlockECReconstructionInfo blkECRecoveryInfo0 = new BlockECReconstructionInfo(new ExtendedBlock("bp1", 1234), dnInfos0, targetDnInfos0, liveBlkIndices0, StripedFileTestUtil.getDefaultECPolicy());
    DatanodeInfo[] dnInfos1 = new DatanodeInfo[] { DFSTestUtil.getLocalDatanodeInfo(), DFSTestUtil.getLocalDatanodeInfo() };
    DatanodeStorageInfo targetDnInfos_2 = BlockManagerTestUtil.newDatanodeStorageInfo(DFSTestUtil.getLocalDatanodeDescriptor(), new DatanodeStorage("s02"));
    DatanodeStorageInfo targetDnInfos_3 = BlockManagerTestUtil.newDatanodeStorageInfo(DFSTestUtil.getLocalDatanodeDescriptor(), new DatanodeStorage("s03"));
    DatanodeStorageInfo[] targetDnInfos1 = new DatanodeStorageInfo[] { targetDnInfos_2, targetDnInfos_3 };
    byte[] liveBlkIndices1 = new byte[2];
    BlockECReconstructionInfo blkECRecoveryInfo1 = new BlockECReconstructionInfo(new ExtendedBlock("bp2", 3256), dnInfos1, targetDnInfos1, liveBlkIndices1, StripedFileTestUtil.getDefaultECPolicy());
    List<BlockECReconstructionInfo> blkRecoveryInfosList = new ArrayList<BlockECReconstructionInfo>();
    blkRecoveryInfosList.add(blkECRecoveryInfo0);
    blkRecoveryInfosList.add(blkECRecoveryInfo1);
    BlockECReconstructionCommand blkECReconstructionCmd = new BlockECReconstructionCommand(DatanodeProtocol.DNA_ERASURE_CODING_RECONSTRUCTION, blkRecoveryInfosList);
    BlockECReconstructionCommandProto blkECRecoveryCmdProto = PBHelper.convert(blkECReconstructionCmd);
    blkECReconstructionCmd = PBHelper.convert(blkECRecoveryCmdProto);
    Iterator<BlockECReconstructionInfo> iterator = blkECReconstructionCmd.getECTasks().iterator();
    assertBlockECRecoveryInfoEquals(blkECRecoveryInfo0, iterator.next());
    assertBlockECRecoveryInfoEquals(blkECRecoveryInfo1, iterator.next());
}
Also used : BlockECReconstructionInfo(org.apache.hadoop.hdfs.server.protocol.BlockECReconstructionCommand.BlockECReconstructionInfo) DatanodeInfo(org.apache.hadoop.hdfs.protocol.DatanodeInfo) ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) ArrayList(java.util.ArrayList) BlockECReconstructionCommand(org.apache.hadoop.hdfs.server.protocol.BlockECReconstructionCommand) BlockECReconstructionCommandProto(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockECReconstructionCommandProto) DatanodeStorageInfo(org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo) DatanodeStorage(org.apache.hadoop.hdfs.server.protocol.DatanodeStorage) Test(org.junit.Test)

Aggregations

DatanodeInfo (org.apache.hadoop.hdfs.protocol.DatanodeInfo)214 Test (org.junit.Test)103 Path (org.apache.hadoop.fs.Path)91 LocatedBlock (org.apache.hadoop.hdfs.protocol.LocatedBlock)73 IOException (java.io.IOException)47 FileSystem (org.apache.hadoop.fs.FileSystem)44 ExtendedBlock (org.apache.hadoop.hdfs.protocol.ExtendedBlock)43 ArrayList (java.util.ArrayList)39 Configuration (org.apache.hadoop.conf.Configuration)38 DataNode (org.apache.hadoop.hdfs.server.datanode.DataNode)37 FSDataOutputStream (org.apache.hadoop.fs.FSDataOutputStream)32 LocatedBlocks (org.apache.hadoop.hdfs.protocol.LocatedBlocks)32 DistributedFileSystem (org.apache.hadoop.hdfs.DistributedFileSystem)29 MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)27 FSNamesystem (org.apache.hadoop.hdfs.server.namenode.FSNamesystem)25 InetSocketAddress (java.net.InetSocketAddress)20 LocatedStripedBlock (org.apache.hadoop.hdfs.protocol.LocatedStripedBlock)20 StorageType (org.apache.hadoop.fs.StorageType)18 HdfsConfiguration (org.apache.hadoop.hdfs.HdfsConfiguration)14 DatanodeInfoBuilder (org.apache.hadoop.hdfs.protocol.DatanodeInfo.DatanodeInfoBuilder)14