Search in sources :

Example 6 with DatanodeID

use of org.apache.hadoop.hdfs.protocol.DatanodeID in project hadoop by apache.

the class TestPBHelper method testConvertDatanodeRegistration.

@Test
public void testConvertDatanodeRegistration() {
    DatanodeID dnId = DFSTestUtil.getLocalDatanodeID();
    BlockKey[] keys = new BlockKey[] { getBlockKey(2), getBlockKey(3) };
    ExportedBlockKeys expKeys = new ExportedBlockKeys(true, 9, 10, getBlockKey(1), keys);
    DatanodeRegistration reg = new DatanodeRegistration(dnId, new StorageInfo(NodeType.DATA_NODE), expKeys, "3.0.0");
    DatanodeRegistrationProto proto = PBHelper.convert(reg);
    DatanodeRegistration reg2 = PBHelper.convert(proto);
    compare(reg.getStorageInfo(), reg2.getStorageInfo());
    compare(reg.getExportedKeys(), reg2.getExportedKeys());
    compare(reg, reg2);
    assertEquals(reg.getSoftwareVersion(), reg2.getSoftwareVersion());
}
Also used : DatanodeID(org.apache.hadoop.hdfs.protocol.DatanodeID) DatanodeRegistration(org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration) DatanodeRegistrationProto(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto) StorageInfo(org.apache.hadoop.hdfs.server.common.StorageInfo) DatanodeStorageInfo(org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo) ExportedBlockKeys(org.apache.hadoop.hdfs.security.token.block.ExportedBlockKeys) BlockKey(org.apache.hadoop.hdfs.security.token.block.BlockKey) Test(org.junit.Test)

Example 7 with DatanodeID

use of org.apache.hadoop.hdfs.protocol.DatanodeID in project hadoop by apache.

the class TestBlockToken method testBlockTokenRpcLeak.

/**
   * Test that fast repeated invocations of createClientDatanodeProtocolProxy
   * will not end up using up thousands of sockets. This is a regression test
   * for HDFS-1965.
   */
private void testBlockTokenRpcLeak(boolean enableProtobuf) throws Exception {
    Configuration conf = new Configuration();
    conf.set(HADOOP_SECURITY_AUTHENTICATION, "kerberos");
    UserGroupInformation.setConfiguration(conf);
    Assume.assumeTrue(FD_DIR.exists());
    BlockTokenSecretManager sm = new BlockTokenSecretManager(blockKeyUpdateInterval, blockTokenLifetime, 0, 1, "fake-pool", null, enableProtobuf);
    Token<BlockTokenIdentifier> token = sm.generateToken(block3, EnumSet.allOf(BlockTokenIdentifier.AccessMode.class));
    final Server server = createMockDatanode(sm, token, conf);
    server.start();
    final InetSocketAddress addr = NetUtils.getConnectAddress(server);
    DatanodeID fakeDnId = DFSTestUtil.getLocalDatanodeID(addr.getPort());
    ExtendedBlock b = new ExtendedBlock("fake-pool", new Block(12345L));
    LocatedBlock fakeBlock = new LocatedBlock(b, new DatanodeInfo[0]);
    fakeBlock.setBlockToken(token);
    // Create another RPC proxy with the same configuration - this will never
    // attempt to connect anywhere -- but it causes the refcount on the
    // RPC "Client" object to stay above 0 such that RPC.stopProxy doesn't
    // actually close the TCP connections to the real target DN.
    ClientDatanodeProtocol proxyToNoWhere = RPC.getProxy(ClientDatanodeProtocol.class, ClientDatanodeProtocol.versionID, new InetSocketAddress("1.1.1.1", 1), UserGroupInformation.createRemoteUser("junk"), conf, NetUtils.getDefaultSocketFactory(conf));
    ClientDatanodeProtocol proxy = null;
    int fdsAtStart = countOpenFileDescriptors();
    try {
        long endTime = Time.now() + 3000;
        while (Time.now() < endTime) {
            proxy = DFSUtilClient.createClientDatanodeProtocolProxy(fakeDnId, conf, 1000, false, fakeBlock);
            assertEquals(block3.getBlockId(), proxy.getReplicaVisibleLength(block3));
            if (proxy != null) {
                RPC.stopProxy(proxy);
            }
            LOG.info("Num open fds:" + countOpenFileDescriptors());
        }
        int fdsAtEnd = countOpenFileDescriptors();
        if (fdsAtEnd - fdsAtStart > 50) {
            fail("Leaked " + (fdsAtEnd - fdsAtStart) + " fds!");
        }
    } finally {
        server.stop();
    }
    RPC.stopProxy(proxyToNoWhere);
}
Also used : Configuration(org.apache.hadoop.conf.Configuration) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) SaslRpcServer(org.apache.hadoop.security.SaslRpcServer) Server(org.apache.hadoop.ipc.Server) InetSocketAddress(java.net.InetSocketAddress) ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) LocatedBlock(org.apache.hadoop.hdfs.protocol.LocatedBlock) ClientDatanodeProtocol(org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol) DatanodeID(org.apache.hadoop.hdfs.protocol.DatanodeID) Block(org.apache.hadoop.hdfs.protocol.Block) ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) LocatedBlock(org.apache.hadoop.hdfs.protocol.LocatedBlock)

Example 8 with DatanodeID

use of org.apache.hadoop.hdfs.protocol.DatanodeID in project hadoop by apache.

the class TestPBHelper method testDataNodeInfoPBHelper.

@Test
public void testDataNodeInfoPBHelper() {
    DatanodeID id = DFSTestUtil.getLocalDatanodeID();
    DatanodeInfo dnInfos0 = new DatanodeInfoBuilder().setNodeID(id).build();
    dnInfos0.setCapacity(3500L);
    dnInfos0.setDfsUsed(1000L);
    dnInfos0.setNonDfsUsed(2000L);
    dnInfos0.setRemaining(500L);
    HdfsProtos.DatanodeInfoProto dnproto = PBHelperClient.convert(dnInfos0);
    DatanodeInfo dnInfos1 = PBHelperClient.convert(dnproto);
    compare(dnInfos0, dnInfos1);
    assertEquals(dnInfos0.getNonDfsUsed(), dnInfos1.getNonDfsUsed());
    //Testing without nonDfs field
    HdfsProtos.DatanodeInfoProto.Builder b = HdfsProtos.DatanodeInfoProto.newBuilder();
    b.setId(PBHelperClient.convert(id)).setCapacity(3500L).setDfsUsed(1000L).setRemaining(500L);
    DatanodeInfo dnInfos3 = PBHelperClient.convert(b.build());
    assertEquals(dnInfos0.getNonDfsUsed(), dnInfos3.getNonDfsUsed());
}
Also used : DatanodeID(org.apache.hadoop.hdfs.protocol.DatanodeID) DatanodeInfo(org.apache.hadoop.hdfs.protocol.DatanodeInfo) HdfsProtos(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos) DatanodeInfoBuilder(org.apache.hadoop.hdfs.protocol.DatanodeInfo.DatanodeInfoBuilder) Test(org.junit.Test)

Example 9 with DatanodeID

use of org.apache.hadoop.hdfs.protocol.DatanodeID in project hadoop by apache.

the class TestPBHelper method testConvertDatanodeID.

@Test
public void testConvertDatanodeID() {
    DatanodeID dn = DFSTestUtil.getLocalDatanodeID();
    DatanodeIDProto dnProto = PBHelperClient.convert(dn);
    DatanodeID dn2 = PBHelperClient.convert(dnProto);
    compare(dn, dn2);
}
Also used : DatanodeID(org.apache.hadoop.hdfs.protocol.DatanodeID) DatanodeIDProto(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto) Test(org.junit.Test)

Example 10 with DatanodeID

use of org.apache.hadoop.hdfs.protocol.DatanodeID in project hadoop by apache.

the class TestFsck method testFsckMissingECFile.

@Test(timeout = 300000)
public void testFsckMissingECFile() throws Exception {
    DistributedFileSystem fs = null;
    int dataBlocks = StripedFileTestUtil.getDefaultECPolicy().getNumDataUnits();
    int parityBlocks = StripedFileTestUtil.getDefaultECPolicy().getNumParityUnits();
    int cellSize = StripedFileTestUtil.getDefaultECPolicy().getCellSize();
    int totalSize = dataBlocks + parityBlocks;
    conf.set(DFSConfigKeys.DFS_NAMENODE_EC_POLICIES_ENABLED_KEY, StripedFileTestUtil.getDefaultECPolicy().getName());
    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(totalSize).build();
    fs = cluster.getFileSystem();
    // create file
    Path ecDirPath = new Path("/striped");
    fs.mkdir(ecDirPath, FsPermission.getDirDefault());
    fs.getClient().setErasureCodingPolicy(ecDirPath.toString(), StripedFileTestUtil.getDefaultECPolicy().getName());
    Path file = new Path(ecDirPath, "missing");
    final int length = cellSize * dataBlocks;
    final byte[] bytes = StripedFileTestUtil.generateBytes(length);
    DFSTestUtil.writeFile(fs, file, bytes);
    // make an unrecoverable ec file with missing blocks
    ArrayList<DataNode> dns = cluster.getDataNodes();
    DatanodeID dnId;
    for (int i = 0; i < parityBlocks + 1; i++) {
        dnId = dns.get(i).getDatanodeId();
        cluster.stopDataNode(dnId.getXferAddr());
        cluster.setDataNodeDead(dnId);
    }
    waitForUnrecoverableBlockGroup(conf);
    String outStr = runFsck(conf, 1, true, "/", "-files", "-blocks", "-locations");
    assertTrue(outStr.contains(NamenodeFsck.CORRUPT_STATUS));
    assertTrue(outStr.contains("Live_repl=" + (dataBlocks - 1)));
    assertTrue(outStr.contains("Under-erasure-coded block groups:\t0"));
    outStr = runFsck(conf, -1, true, "/", "-list-corruptfileblocks");
    assertTrue(outStr.contains("has 1 CORRUPT files"));
}
Also used : Path(org.apache.hadoop.fs.Path) DatanodeID(org.apache.hadoop.hdfs.protocol.DatanodeID) MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) DataNode(org.apache.hadoop.hdfs.server.datanode.DataNode) Matchers.anyString(org.mockito.Matchers.anyString) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) Test(org.junit.Test)

Aggregations

DatanodeID (org.apache.hadoop.hdfs.protocol.DatanodeID)51 Test (org.junit.Test)36 ExtendedBlock (org.apache.hadoop.hdfs.protocol.ExtendedBlock)18 Configuration (org.apache.hadoop.conf.Configuration)13 Path (org.apache.hadoop.fs.Path)12 MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)10 DatanodeInfo (org.apache.hadoop.hdfs.protocol.DatanodeInfo)10 IOException (java.io.IOException)8 InetSocketAddress (java.net.InetSocketAddress)8 Peer (org.apache.hadoop.hdfs.net.Peer)8 DataNode (org.apache.hadoop.hdfs.server.datanode.DataNode)8 DatanodeRegistration (org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration)8 HdfsConfiguration (org.apache.hadoop.hdfs.HdfsConfiguration)7 Block (org.apache.hadoop.hdfs.protocol.Block)7 FSNamesystem (org.apache.hadoop.hdfs.server.namenode.FSNamesystem)6 Socket (java.net.Socket)5 FileSystem (org.apache.hadoop.fs.FileSystem)5 DistributedFileSystem (org.apache.hadoop.hdfs.DistributedFileSystem)5 LocatedBlock (org.apache.hadoop.hdfs.protocol.LocatedBlock)5 StorageInfo (org.apache.hadoop.hdfs.server.common.StorageInfo)5