Search in sources :

Example 21 with Block

use of org.apache.hadoop.hdfs.protocol.Block in project hadoop by apache.

the class TestBlockToken method testBlockTokenRpcLeak.

/**
   * Test that fast repeated invocations of createClientDatanodeProtocolProxy
   * will not end up using up thousands of sockets. This is a regression test
   * for HDFS-1965.
   */
private void testBlockTokenRpcLeak(boolean enableProtobuf) throws Exception {
    Configuration conf = new Configuration();
    conf.set(HADOOP_SECURITY_AUTHENTICATION, "kerberos");
    UserGroupInformation.setConfiguration(conf);
    Assume.assumeTrue(FD_DIR.exists());
    BlockTokenSecretManager sm = new BlockTokenSecretManager(blockKeyUpdateInterval, blockTokenLifetime, 0, 1, "fake-pool", null, enableProtobuf);
    Token<BlockTokenIdentifier> token = sm.generateToken(block3, EnumSet.allOf(BlockTokenIdentifier.AccessMode.class));
    final Server server = createMockDatanode(sm, token, conf);
    server.start();
    final InetSocketAddress addr = NetUtils.getConnectAddress(server);
    DatanodeID fakeDnId = DFSTestUtil.getLocalDatanodeID(addr.getPort());
    ExtendedBlock b = new ExtendedBlock("fake-pool", new Block(12345L));
    LocatedBlock fakeBlock = new LocatedBlock(b, new DatanodeInfo[0]);
    fakeBlock.setBlockToken(token);
    // Create another RPC proxy with the same configuration - this will never
    // attempt to connect anywhere -- but it causes the refcount on the
    // RPC "Client" object to stay above 0 such that RPC.stopProxy doesn't
    // actually close the TCP connections to the real target DN.
    ClientDatanodeProtocol proxyToNoWhere = RPC.getProxy(ClientDatanodeProtocol.class, ClientDatanodeProtocol.versionID, new InetSocketAddress("1.1.1.1", 1), UserGroupInformation.createRemoteUser("junk"), conf, NetUtils.getDefaultSocketFactory(conf));
    ClientDatanodeProtocol proxy = null;
    int fdsAtStart = countOpenFileDescriptors();
    try {
        long endTime = Time.now() + 3000;
        while (Time.now() < endTime) {
            proxy = DFSUtilClient.createClientDatanodeProtocolProxy(fakeDnId, conf, 1000, false, fakeBlock);
            assertEquals(block3.getBlockId(), proxy.getReplicaVisibleLength(block3));
            if (proxy != null) {
                RPC.stopProxy(proxy);
            }
            LOG.info("Num open fds:" + countOpenFileDescriptors());
        }
        int fdsAtEnd = countOpenFileDescriptors();
        if (fdsAtEnd - fdsAtStart > 50) {
            fail("Leaked " + (fdsAtEnd - fdsAtStart) + " fds!");
        }
    } finally {
        server.stop();
    }
    RPC.stopProxy(proxyToNoWhere);
}
Also used : Configuration(org.apache.hadoop.conf.Configuration) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) SaslRpcServer(org.apache.hadoop.security.SaslRpcServer) Server(org.apache.hadoop.ipc.Server) InetSocketAddress(java.net.InetSocketAddress) ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) LocatedBlock(org.apache.hadoop.hdfs.protocol.LocatedBlock) ClientDatanodeProtocol(org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol) DatanodeID(org.apache.hadoop.hdfs.protocol.DatanodeID) Block(org.apache.hadoop.hdfs.protocol.Block) ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) LocatedBlock(org.apache.hadoop.hdfs.protocol.LocatedBlock)

Example 22 with Block

use of org.apache.hadoop.hdfs.protocol.Block in project hadoop by apache.

the class TestPBHelper method testConvertBlockCommand.

@Test
public void testConvertBlockCommand() {
    Block[] blocks = new Block[] { new Block(21), new Block(22) };
    DatanodeInfo[][] dnInfos = new DatanodeInfo[][] { new DatanodeInfo[1], new DatanodeInfo[2] };
    dnInfos[0][0] = DFSTestUtil.getLocalDatanodeInfo();
    dnInfos[1][0] = DFSTestUtil.getLocalDatanodeInfo();
    dnInfos[1][1] = DFSTestUtil.getLocalDatanodeInfo();
    String[][] storageIDs = { { "s00" }, { "s10", "s11" } };
    StorageType[][] storageTypes = { { StorageType.DEFAULT }, { StorageType.DEFAULT, StorageType.DEFAULT } };
    BlockCommand bc = new BlockCommand(DatanodeProtocol.DNA_TRANSFER, "bp1", blocks, dnInfos, storageTypes, storageIDs);
    BlockCommandProto bcProto = PBHelper.convert(bc);
    BlockCommand bc2 = PBHelper.convert(bcProto);
    assertEquals(bc.getAction(), bc2.getAction());
    assertEquals(bc.getBlocks().length, bc2.getBlocks().length);
    Block[] blocks2 = bc2.getBlocks();
    for (int i = 0; i < blocks.length; i++) {
        assertEquals(blocks[i], blocks2[i]);
    }
    DatanodeInfo[][] dnInfos2 = bc2.getTargets();
    assertEquals(dnInfos.length, dnInfos2.length);
    for (int i = 0; i < dnInfos.length; i++) {
        DatanodeInfo[] d1 = dnInfos[i];
        DatanodeInfo[] d2 = dnInfos2[i];
        assertEquals(d1.length, d2.length);
        for (int j = 0; j < d1.length; j++) {
            compare(d1[j], d2[j]);
        }
    }
}
Also used : DatanodeInfo(org.apache.hadoop.hdfs.protocol.DatanodeInfo) Block(org.apache.hadoop.hdfs.protocol.Block) ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) RecoveringBlock(org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringBlock) LocatedBlock(org.apache.hadoop.hdfs.protocol.LocatedBlock) BlockCommandProto(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto) BlockCommand(org.apache.hadoop.hdfs.server.protocol.BlockCommand) Test(org.junit.Test)

Example 23 with Block

use of org.apache.hadoop.hdfs.protocol.Block in project hadoop by apache.

the class TestReplicaMap method testRemove.

@Test
public void testRemove() {
    // Test 1: null argument throws invalid argument exception
    try {
        map.remove(bpid, null);
        fail("Expected exception not thrown");
    } catch (IllegalArgumentException expected) {
    }
    // Test 2: remove failure - generation stamp mismatch 
    Block b = new Block(block);
    b.setGenerationStamp(0);
    assertNull(map.remove(bpid, b));
    // Test 3: remove failure - blockID mismatch
    b.setGenerationStamp(block.getGenerationStamp());
    b.setBlockId(0);
    assertNull(map.remove(bpid, b));
    // Test 4: remove success
    assertNotNull(map.remove(bpid, block));
    // Test 5: remove failure - invalid blockID
    assertNull(map.remove(bpid, 0));
    // Test 6: remove success
    map.add(bpid, new FinalizedReplica(block, null, null));
    assertNotNull(map.remove(bpid, block.getBlockId()));
}
Also used : Block(org.apache.hadoop.hdfs.protocol.Block) FinalizedReplica(org.apache.hadoop.hdfs.server.datanode.FinalizedReplica) Test(org.junit.Test)

Example 24 with Block

use of org.apache.hadoop.hdfs.protocol.Block in project hadoop by apache.

the class TestReplicaMap method testGet.

/**
   * Test for ReplicasMap.get(Block) and ReplicasMap.get(long) tests
   */
@Test
public void testGet() {
    // Test 1: null argument throws invalid argument exception
    try {
        map.get(bpid, null);
        fail("Expected exception not thrown");
    } catch (IllegalArgumentException expected) {
    }
    // Test 2: successful lookup based on block
    assertNotNull(map.get(bpid, block));
    // Test 3: Lookup failure - generation stamp mismatch 
    Block b = new Block(block);
    b.setGenerationStamp(0);
    assertNull(map.get(bpid, b));
    // Test 4: Lookup failure - blockID mismatch
    b.setGenerationStamp(block.getGenerationStamp());
    b.setBlockId(0);
    assertNull(map.get(bpid, b));
    // Test 5: successful lookup based on block ID
    assertNotNull(map.get(bpid, block.getBlockId()));
    // Test 6: failed lookup for invalid block ID
    assertNull(map.get(bpid, 0));
}
Also used : Block(org.apache.hadoop.hdfs.protocol.Block) Test(org.junit.Test)

Example 25 with Block

use of org.apache.hadoop.hdfs.protocol.Block in project hadoop by apache.

the class TestSpaceReservation method testTmpSpaceReserve.

@Test(timeout = 300000)
public void testTmpSpaceReserve() throws Exception {
    final short replication = 2;
    startCluster(BLOCK_SIZE, replication, -1);
    final int byteCount1 = 100;
    final int byteCount2 = 200;
    final String methodName = GenericTestUtils.getMethodName();
    // Test positive scenario
    {
        final Path file = new Path("/" + methodName + ".01.dat");
        try (FSDataOutputStream os = fs.create(file, (short) 1)) {
            // Write test data to the file
            os.write(new byte[byteCount1]);
            os.hsync();
        }
        BlockLocation[] blockLocations = fs.getFileBlockLocations(file, 0, 10);
        String firstReplicaNode = blockLocations[0].getNames()[0];
        int newReplicaDNIndex = 0;
        if (firstReplicaNode.equals(cluster.getDataNodes().get(0).getDisplayName())) {
            newReplicaDNIndex = 1;
        }
        FsVolumeImpl fsVolumeImpl = (FsVolumeImpl) cluster.getDataNodes().get(newReplicaDNIndex).getFSDataset().getFsVolumeReferences().get(0);
        performReReplication(file, true);
        assertEquals("Wrong reserve space for Tmp ", byteCount1, fsVolumeImpl.getRecentReserved());
        assertEquals("Reserved Tmp space is not released", 0, fsVolumeImpl.getReservedForReplicas());
    }
    // Test when file creation fails
    {
        final Path file = new Path("/" + methodName + ".01.dat");
        try (FSDataOutputStream os = fs.create(file, (short) 1)) {
            // Write test data to the file
            os.write(new byte[byteCount2]);
            os.hsync();
        }
        BlockLocation[] blockLocations = fs.getFileBlockLocations(file, 0, 10);
        String firstReplicaNode = blockLocations[0].getNames()[0];
        int newReplicaDNIndex = 0;
        if (firstReplicaNode.equals(cluster.getDataNodes().get(0).getDisplayName())) {
            newReplicaDNIndex = 1;
        }
        BlockPoolSlice blockPoolSlice = Mockito.mock(BlockPoolSlice.class);
        Mockito.when(blockPoolSlice.createTmpFile((Block) Mockito.any())).thenThrow(new IOException("Synthetic IO Exception Throgh MOCK"));
        final FsVolumeImpl fsVolumeImpl = (FsVolumeImpl) cluster.getDataNodes().get(newReplicaDNIndex).getFSDataset().getFsVolumeReferences().get(0);
        // Reserve some bytes to verify double clearing space should't happen
        fsVolumeImpl.reserveSpaceForReplica(1000);
        Field field = FsVolumeImpl.class.getDeclaredField("bpSlices");
        field.setAccessible(true);
        @SuppressWarnings("unchecked") Map<String, BlockPoolSlice> bpSlices = (Map<String, BlockPoolSlice>) field.get(fsVolumeImpl);
        bpSlices.put(fsVolumeImpl.getBlockPoolList()[0], blockPoolSlice);
        performReReplication(file, false);
        assertEquals("Wrong reserve space for Tmp ", byteCount2, fsVolumeImpl.getRecentReserved());
        assertEquals("Tmp space is not released OR released twice", 1000, fsVolumeImpl.getReservedForReplicas());
    }
}
Also used : Path(org.apache.hadoop.fs.Path) Field(java.lang.reflect.Field) Block(org.apache.hadoop.hdfs.protocol.Block) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream) IOException(java.io.IOException) Map(java.util.Map) Test(org.junit.Test)

Aggregations

Block (org.apache.hadoop.hdfs.protocol.Block)155 LocatedBlock (org.apache.hadoop.hdfs.protocol.LocatedBlock)79 Test (org.junit.Test)77 ExtendedBlock (org.apache.hadoop.hdfs.protocol.ExtendedBlock)74 Path (org.apache.hadoop.fs.Path)28 LocatedStripedBlock (org.apache.hadoop.hdfs.protocol.LocatedStripedBlock)26 IOException (java.io.IOException)24 BlockInfo (org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo)22 Configuration (org.apache.hadoop.conf.Configuration)20 ReceivedDeletedBlockInfo (org.apache.hadoop.hdfs.server.protocol.ReceivedDeletedBlockInfo)18 HdfsConfiguration (org.apache.hadoop.hdfs.HdfsConfiguration)17 BlockInfoContiguous (org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguous)17 CachedBlock (org.apache.hadoop.hdfs.server.namenode.CachedBlock)17 BlockInfoStriped (org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoStriped)15 MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)14 ArrayList (java.util.ArrayList)12 RecoveringBlock (org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringBlock)11 DatanodeStorage (org.apache.hadoop.hdfs.server.protocol.DatanodeStorage)11 FsPermission (org.apache.hadoop.fs.permission.FsPermission)10 DatanodeRegistration (org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration)10