Search in sources :

Example 76 with Block

use of org.apache.hadoop.hdfs.protocol.Block in project hadoop by apache.

the class TestReplicaMap method testRemove.

@Test
public void testRemove() {
    // Test 1: null argument throws invalid argument exception
    try {
        map.remove(bpid, null);
        fail("Expected exception not thrown");
    } catch (IllegalArgumentException expected) {
    }
    // Test 2: remove failure - generation stamp mismatch 
    Block b = new Block(block);
    b.setGenerationStamp(0);
    assertNull(map.remove(bpid, b));
    // Test 3: remove failure - blockID mismatch
    b.setGenerationStamp(block.getGenerationStamp());
    b.setBlockId(0);
    assertNull(map.remove(bpid, b));
    // Test 4: remove success
    assertNotNull(map.remove(bpid, block));
    // Test 5: remove failure - invalid blockID
    assertNull(map.remove(bpid, 0));
    // Test 6: remove success
    map.add(bpid, new FinalizedReplica(block, null, null));
    assertNotNull(map.remove(bpid, block.getBlockId()));
}
Also used : Block(org.apache.hadoop.hdfs.protocol.Block) FinalizedReplica(org.apache.hadoop.hdfs.server.datanode.FinalizedReplica) Test(org.junit.Test)

Example 77 with Block

use of org.apache.hadoop.hdfs.protocol.Block in project hadoop by apache.

the class TestReplicaMap method testGet.

/**
   * Test for ReplicasMap.get(Block) and ReplicasMap.get(long) tests
   */
@Test
public void testGet() {
    // Test 1: null argument throws invalid argument exception
    try {
        map.get(bpid, null);
        fail("Expected exception not thrown");
    } catch (IllegalArgumentException expected) {
    }
    // Test 2: successful lookup based on block
    assertNotNull(map.get(bpid, block));
    // Test 3: Lookup failure - generation stamp mismatch 
    Block b = new Block(block);
    b.setGenerationStamp(0);
    assertNull(map.get(bpid, b));
    // Test 4: Lookup failure - blockID mismatch
    b.setGenerationStamp(block.getGenerationStamp());
    b.setBlockId(0);
    assertNull(map.get(bpid, b));
    // Test 5: successful lookup based on block ID
    assertNotNull(map.get(bpid, block.getBlockId()));
    // Test 6: failed lookup for invalid block ID
    assertNull(map.get(bpid, 0));
}
Also used : Block(org.apache.hadoop.hdfs.protocol.Block) Test(org.junit.Test)

Example 78 with Block

use of org.apache.hadoop.hdfs.protocol.Block in project hadoop by apache.

the class TestSpaceReservation method testTmpSpaceReserve.

@Test(timeout = 300000)
public void testTmpSpaceReserve() throws Exception {
    final short replication = 2;
    startCluster(BLOCK_SIZE, replication, -1);
    final int byteCount1 = 100;
    final int byteCount2 = 200;
    final String methodName = GenericTestUtils.getMethodName();
    // Test positive scenario
    {
        final Path file = new Path("/" + methodName + ".01.dat");
        try (FSDataOutputStream os = fs.create(file, (short) 1)) {
            // Write test data to the file
            os.write(new byte[byteCount1]);
            os.hsync();
        }
        BlockLocation[] blockLocations = fs.getFileBlockLocations(file, 0, 10);
        String firstReplicaNode = blockLocations[0].getNames()[0];
        int newReplicaDNIndex = 0;
        if (firstReplicaNode.equals(cluster.getDataNodes().get(0).getDisplayName())) {
            newReplicaDNIndex = 1;
        }
        FsVolumeImpl fsVolumeImpl = (FsVolumeImpl) cluster.getDataNodes().get(newReplicaDNIndex).getFSDataset().getFsVolumeReferences().get(0);
        performReReplication(file, true);
        assertEquals("Wrong reserve space for Tmp ", byteCount1, fsVolumeImpl.getRecentReserved());
        assertEquals("Reserved Tmp space is not released", 0, fsVolumeImpl.getReservedForReplicas());
    }
    // Test when file creation fails
    {
        final Path file = new Path("/" + methodName + ".01.dat");
        try (FSDataOutputStream os = fs.create(file, (short) 1)) {
            // Write test data to the file
            os.write(new byte[byteCount2]);
            os.hsync();
        }
        BlockLocation[] blockLocations = fs.getFileBlockLocations(file, 0, 10);
        String firstReplicaNode = blockLocations[0].getNames()[0];
        int newReplicaDNIndex = 0;
        if (firstReplicaNode.equals(cluster.getDataNodes().get(0).getDisplayName())) {
            newReplicaDNIndex = 1;
        }
        BlockPoolSlice blockPoolSlice = Mockito.mock(BlockPoolSlice.class);
        Mockito.when(blockPoolSlice.createTmpFile((Block) Mockito.any())).thenThrow(new IOException("Synthetic IO Exception Throgh MOCK"));
        final FsVolumeImpl fsVolumeImpl = (FsVolumeImpl) cluster.getDataNodes().get(newReplicaDNIndex).getFSDataset().getFsVolumeReferences().get(0);
        // Reserve some bytes to verify double clearing space should't happen
        fsVolumeImpl.reserveSpaceForReplica(1000);
        Field field = FsVolumeImpl.class.getDeclaredField("bpSlices");
        field.setAccessible(true);
        @SuppressWarnings("unchecked") Map<String, BlockPoolSlice> bpSlices = (Map<String, BlockPoolSlice>) field.get(fsVolumeImpl);
        bpSlices.put(fsVolumeImpl.getBlockPoolList()[0], blockPoolSlice);
        performReReplication(file, false);
        assertEquals("Wrong reserve space for Tmp ", byteCount2, fsVolumeImpl.getRecentReserved());
        assertEquals("Tmp space is not released OR released twice", 1000, fsVolumeImpl.getReservedForReplicas());
    }
}
Also used : Path(org.apache.hadoop.fs.Path) Field(java.lang.reflect.Field) Block(org.apache.hadoop.hdfs.protocol.Block) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream) IOException(java.io.IOException) Map(java.util.Map) Test(org.junit.Test)

Example 79 with Block

use of org.apache.hadoop.hdfs.protocol.Block in project hadoop by apache.

the class TestInterDatanodeProtocol method checkMetaInfo.

public static void checkMetaInfo(ExtendedBlock b, DataNode dn) throws IOException {
    Block metainfo = DataNodeTestUtils.getFSDataset(dn).getStoredBlock(b.getBlockPoolId(), b.getBlockId());
    Assert.assertEquals(b.getBlockId(), metainfo.getBlockId());
    Assert.assertEquals(b.getNumBytes(), metainfo.getNumBytes());
}
Also used : ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) RecoveringBlock(org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringBlock) LocatedBlock(org.apache.hadoop.hdfs.protocol.LocatedBlock) Block(org.apache.hadoop.hdfs.protocol.Block)

Example 80 with Block

use of org.apache.hadoop.hdfs.protocol.Block in project hadoop by apache.

the class TestLeaseRecovery method testBlockSynchronization.

/**
   * The following test first creates a file with a few blocks.
   * It randomly truncates the replica of the last block stored in each datanode.
   * Finally, it triggers block synchronization to synchronize all stored block.
   */
@Test
public void testBlockSynchronization() throws Exception {
    final int ORG_FILE_SIZE = 3000;
    Configuration conf = new HdfsConfiguration();
    conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCK_SIZE);
    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(5).build();
    cluster.waitActive();
    //create a file
    DistributedFileSystem dfs = cluster.getFileSystem();
    String filestr = "/foo";
    Path filepath = new Path(filestr);
    DFSTestUtil.createFile(dfs, filepath, ORG_FILE_SIZE, REPLICATION_NUM, 0L);
    assertTrue(dfs.exists(filepath));
    DFSTestUtil.waitReplication(dfs, filepath, REPLICATION_NUM);
    //get block info for the last block
    LocatedBlock locatedblock = TestInterDatanodeProtocol.getLastLocatedBlock(dfs.dfs.getNamenode(), filestr);
    DatanodeInfo[] datanodeinfos = locatedblock.getLocations();
    assertEquals(REPLICATION_NUM, datanodeinfos.length);
    //connect to data nodes
    DataNode[] datanodes = new DataNode[REPLICATION_NUM];
    for (int i = 0; i < REPLICATION_NUM; i++) {
        datanodes[i] = cluster.getDataNode(datanodeinfos[i].getIpcPort());
        assertTrue(datanodes[i] != null);
    }
    //verify Block Info
    ExtendedBlock lastblock = locatedblock.getBlock();
    DataNode.LOG.info("newblocks=" + lastblock);
    for (int i = 0; i < REPLICATION_NUM; i++) {
        checkMetaInfo(lastblock, datanodes[i]);
    }
    DataNode.LOG.info("dfs.dfs.clientName=" + dfs.dfs.clientName);
    cluster.getNameNodeRpc().append(filestr, dfs.dfs.clientName, new EnumSetWritable<>(EnumSet.of(CreateFlag.APPEND)));
    // expire lease to trigger block recovery.
    waitLeaseRecovery(cluster);
    Block[] updatedmetainfo = new Block[REPLICATION_NUM];
    long oldSize = lastblock.getNumBytes();
    lastblock = TestInterDatanodeProtocol.getLastLocatedBlock(dfs.dfs.getNamenode(), filestr).getBlock();
    long currentGS = lastblock.getGenerationStamp();
    for (int i = 0; i < REPLICATION_NUM; i++) {
        updatedmetainfo[i] = DataNodeTestUtils.getFSDataset(datanodes[i]).getStoredBlock(lastblock.getBlockPoolId(), lastblock.getBlockId());
        assertEquals(lastblock.getBlockId(), updatedmetainfo[i].getBlockId());
        assertEquals(oldSize, updatedmetainfo[i].getNumBytes());
        assertEquals(currentGS, updatedmetainfo[i].getGenerationStamp());
    }
    // verify that lease recovery does not occur when namenode is in safemode
    System.out.println("Testing that lease recovery cannot happen during safemode.");
    filestr = "/foo.safemode";
    filepath = new Path(filestr);
    dfs.create(filepath, (short) 1);
    cluster.getNameNodeRpc().setSafeMode(HdfsConstants.SafeModeAction.SAFEMODE_ENTER, false);
    assertTrue(dfs.dfs.exists(filestr));
    DFSTestUtil.waitReplication(dfs, filepath, (short) 1);
    waitLeaseRecovery(cluster);
    // verify that we still cannot recover the lease
    LeaseManager lm = NameNodeAdapter.getLeaseManager(cluster.getNamesystem());
    assertTrue("Found " + lm.countLease() + " lease, expected 1", lm.countLease() == 1);
    cluster.getNameNodeRpc().setSafeMode(HdfsConstants.SafeModeAction.SAFEMODE_LEAVE, false);
}
Also used : Path(org.apache.hadoop.fs.Path) DatanodeInfo(org.apache.hadoop.hdfs.protocol.DatanodeInfo) Configuration(org.apache.hadoop.conf.Configuration) ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) LocatedBlock(org.apache.hadoop.hdfs.protocol.LocatedBlock) LeaseManager(org.apache.hadoop.hdfs.server.namenode.LeaseManager) DataNode(org.apache.hadoop.hdfs.server.datanode.DataNode) ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) LocatedBlock(org.apache.hadoop.hdfs.protocol.LocatedBlock) Block(org.apache.hadoop.hdfs.protocol.Block) Test(org.junit.Test)

Aggregations

Block (org.apache.hadoop.hdfs.protocol.Block)155 LocatedBlock (org.apache.hadoop.hdfs.protocol.LocatedBlock)79 Test (org.junit.Test)77 ExtendedBlock (org.apache.hadoop.hdfs.protocol.ExtendedBlock)74 Path (org.apache.hadoop.fs.Path)28 LocatedStripedBlock (org.apache.hadoop.hdfs.protocol.LocatedStripedBlock)26 IOException (java.io.IOException)24 BlockInfo (org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo)22 Configuration (org.apache.hadoop.conf.Configuration)20 ReceivedDeletedBlockInfo (org.apache.hadoop.hdfs.server.protocol.ReceivedDeletedBlockInfo)18 HdfsConfiguration (org.apache.hadoop.hdfs.HdfsConfiguration)17 BlockInfoContiguous (org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguous)17 CachedBlock (org.apache.hadoop.hdfs.server.namenode.CachedBlock)17 BlockInfoStriped (org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoStriped)15 MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)14 ArrayList (java.util.ArrayList)12 RecoveringBlock (org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringBlock)11 DatanodeStorage (org.apache.hadoop.hdfs.server.protocol.DatanodeStorage)11 FsPermission (org.apache.hadoop.fs.permission.FsPermission)10 DatanodeRegistration (org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration)10