Search in sources :

Example 11 with ExtendedBlock

use of org.apache.hadoop.hdfs.protocol.ExtendedBlock in project hadoop by apache.

the class TestReplication method testReplicationWhenBlockCorruption.

/**
   * Test that blocks should get replicated if we have corrupted blocks and
   * having good replicas at least equal or greater to minreplication
   *
   * Simulate rbw blocks by creating dummy copies, then a DN restart to detect
   * those corrupted blocks asap.
   */
@Test(timeout = 30000)
public void testReplicationWhenBlockCorruption() throws Exception {
    MiniDFSCluster cluster = null;
    try {
        Configuration conf = new HdfsConfiguration();
        conf.setLong(DFSConfigKeys.DFS_NAMENODE_RECONSTRUCTION_PENDING_TIMEOUT_SEC_KEY, 1);
        cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).storagesPerDatanode(1).build();
        FileSystem fs = cluster.getFileSystem();
        Path filePath = new Path("/test");
        FSDataOutputStream create = fs.create(filePath);
        fs.setReplication(filePath, (short) 1);
        create.write(new byte[1024]);
        create.close();
        ExtendedBlock block = DFSTestUtil.getFirstBlock(fs, filePath);
        int numReplicaCreated = 0;
        for (final DataNode dn : cluster.getDataNodes()) {
            if (!dn.getFSDataset().contains(block)) {
                cluster.getFsDatasetTestUtils(dn).injectCorruptReplica(block);
                numReplicaCreated++;
            }
        }
        assertEquals(2, numReplicaCreated);
        fs.setReplication(filePath, (short) 3);
        // Lets detect all DNs about dummy copied
        cluster.restartDataNodes();
        // blocks
        cluster.waitActive();
        cluster.triggerBlockReports();
        DFSTestUtil.waitReplication(fs, filePath, (short) 3);
    } finally {
        if (cluster != null) {
            cluster.shutdown();
        }
    }
}
Also used : Path(org.apache.hadoop.fs.Path) Configuration(org.apache.hadoop.conf.Configuration) DataNode(org.apache.hadoop.hdfs.server.datanode.DataNode) MetricsRecordBuilder(org.apache.hadoop.metrics2.MetricsRecordBuilder) FileSystem(org.apache.hadoop.fs.FileSystem) ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream) Test(org.junit.Test)

Example 12 with ExtendedBlock

use of org.apache.hadoop.hdfs.protocol.ExtendedBlock in project hadoop by apache.

the class TestFsDatasetImpl method testRemoveVolumes.

@Test(timeout = 30000)
public void testRemoveVolumes() throws IOException {
    // Feed FsDataset with block metadata.
    final int NUM_BLOCKS = 100;
    for (int i = 0; i < NUM_BLOCKS; i++) {
        String bpid = BLOCK_POOL_IDS[NUM_BLOCKS % BLOCK_POOL_IDS.length];
        ExtendedBlock eb = new ExtendedBlock(bpid, i);
        try (ReplicaHandler replica = dataset.createRbw(StorageType.DEFAULT, eb, false)) {
        }
    }
    final String[] dataDirs = conf.get(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY).split(",");
    final String volumePathToRemove = dataDirs[0];
    Set<StorageLocation> volumesToRemove = new HashSet<>();
    volumesToRemove.add(StorageLocation.parse(volumePathToRemove));
    FsVolumeReferences volReferences = dataset.getFsVolumeReferences();
    FsVolumeImpl volumeToRemove = null;
    for (FsVolumeSpi vol : volReferences) {
        if (vol.getStorageLocation().equals(volumesToRemove.iterator().next())) {
            volumeToRemove = (FsVolumeImpl) vol;
        }
    }
    assertTrue(volumeToRemove != null);
    volReferences.close();
    dataset.removeVolumes(volumesToRemove, true);
    int expectedNumVolumes = dataDirs.length - 1;
    assertEquals("The volume has been removed from the volumeList.", expectedNumVolumes, getNumVolumes());
    assertEquals("The volume has been removed from the storageMap.", expectedNumVolumes, dataset.storageMap.size());
    try {
        dataset.asyncDiskService.execute(volumeToRemove, new Runnable() {

            @Override
            public void run() {
            }
        });
        fail("Expect RuntimeException: the volume has been removed from the " + "AsyncDiskService.");
    } catch (RuntimeException e) {
        GenericTestUtils.assertExceptionContains("Cannot find volume", e);
    }
    int totalNumReplicas = 0;
    for (String bpid : dataset.volumeMap.getBlockPoolList()) {
        totalNumReplicas += dataset.volumeMap.size(bpid);
    }
    assertEquals("The replica infos on this volume has been removed from the " + "volumeMap.", NUM_BLOCKS / NUM_INIT_VOLUMES, totalNumReplicas);
}
Also used : ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) FsVolumeReferences(org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi.FsVolumeReferences) Matchers.anyString(org.mockito.Matchers.anyString) ReplicaHandler(org.apache.hadoop.hdfs.server.datanode.ReplicaHandler) FsVolumeSpi(org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi) StorageLocation(org.apache.hadoop.hdfs.server.datanode.StorageLocation) HashSet(java.util.HashSet) Test(org.junit.Test)

Example 13 with ExtendedBlock

use of org.apache.hadoop.hdfs.protocol.ExtendedBlock in project hadoop by apache.

the class TestFsDatasetImpl method testDeletingBlocks.

@Test
public void testDeletingBlocks() throws IOException {
    HdfsConfiguration conf = new HdfsConfiguration();
    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
    try {
        cluster.waitActive();
        DataNode dn = cluster.getDataNodes().get(0);
        FsDatasetSpi<?> ds = DataNodeTestUtils.getFSDataset(dn);
        ds.addBlockPool(BLOCKPOOL, conf);
        FsVolumeImpl vol;
        try (FsDatasetSpi.FsVolumeReferences volumes = ds.getFsVolumeReferences()) {
            vol = (FsVolumeImpl) volumes.get(0);
        }
        ExtendedBlock eb;
        ReplicaInfo info;
        List<Block> blockList = new ArrayList<>();
        for (int i = 1; i <= 63; i++) {
            eb = new ExtendedBlock(BLOCKPOOL, i, 1, 1000 + i);
            cluster.getFsDatasetTestUtils(0).createFinalizedReplica(eb);
            blockList.add(eb.getLocalBlock());
        }
        ds.invalidate(BLOCKPOOL, blockList.toArray(new Block[0]));
        try {
            Thread.sleep(1000);
        } catch (InterruptedException e) {
        // Nothing to do
        }
        assertTrue(ds.isDeletingBlock(BLOCKPOOL, blockList.get(0).getBlockId()));
        blockList.clear();
        eb = new ExtendedBlock(BLOCKPOOL, 64, 1, 1064);
        cluster.getFsDatasetTestUtils(0).createFinalizedReplica(eb);
        blockList.add(eb.getLocalBlock());
        ds.invalidate(BLOCKPOOL, blockList.toArray(new Block[0]));
        try {
            Thread.sleep(1000);
        } catch (InterruptedException e) {
        // Nothing to do
        }
        assertFalse(ds.isDeletingBlock(BLOCKPOOL, blockList.get(0).getBlockId()));
    } finally {
        cluster.shutdown();
    }
}
Also used : MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) ReplicaInfo(org.apache.hadoop.hdfs.server.datanode.ReplicaInfo) FsDatasetSpi(org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi) ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) ArrayList(java.util.ArrayList) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) FsVolumeReferences(org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi.FsVolumeReferences) DataNode(org.apache.hadoop.hdfs.server.datanode.DataNode) Block(org.apache.hadoop.hdfs.protocol.Block) ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) LocatedBlock(org.apache.hadoop.hdfs.protocol.LocatedBlock) Test(org.junit.Test)

Example 14 with ExtendedBlock

use of org.apache.hadoop.hdfs.protocol.ExtendedBlock in project hadoop by apache.

the class TestFsDatasetImpl method testReportBadBlocks.

@Test(timeout = 30000)
public void testReportBadBlocks() throws Exception {
    boolean threwException = false;
    MiniDFSCluster cluster = null;
    try {
        Configuration config = new HdfsConfiguration();
        cluster = new MiniDFSCluster.Builder(config).numDataNodes(1).build();
        cluster.waitActive();
        Assert.assertEquals(0, cluster.getNamesystem().getCorruptReplicaBlocks());
        DataNode dataNode = cluster.getDataNodes().get(0);
        ExtendedBlock block = new ExtendedBlock(cluster.getNamesystem().getBlockPoolId(), 0);
        try {
            // Test the reportBadBlocks when the volume is null
            dataNode.reportBadBlocks(block);
        } catch (NullPointerException npe) {
            threwException = true;
        }
        Thread.sleep(3000);
        Assert.assertFalse(threwException);
        Assert.assertEquals(0, cluster.getNamesystem().getCorruptReplicaBlocks());
        FileSystem fs = cluster.getFileSystem();
        Path filePath = new Path("testData");
        DFSTestUtil.createFile(fs, filePath, 1, (short) 1, 0);
        block = DFSTestUtil.getFirstBlock(fs, filePath);
        // Test for the overloaded method reportBadBlocks
        dataNode.reportBadBlocks(block, dataNode.getFSDataset().getFsVolumeReferences().get(0));
        Thread.sleep(3000);
        BlockManagerTestUtil.updateState(cluster.getNamesystem().getBlockManager());
        // Verify the bad block has been reported to namenode
        Assert.assertEquals(1, cluster.getNamesystem().getCorruptReplicaBlocks());
    } finally {
        cluster.shutdown();
    }
}
Also used : Path(org.apache.hadoop.fs.Path) MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) Configuration(org.apache.hadoop.conf.Configuration) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) DataNode(org.apache.hadoop.hdfs.server.datanode.DataNode) FileSystem(org.apache.hadoop.fs.FileSystem) ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) Test(org.junit.Test)

Example 15 with ExtendedBlock

use of org.apache.hadoop.hdfs.protocol.ExtendedBlock in project hadoop by apache.

the class FsDatasetImplTestUtils method createRBW.

@Override
public Replica createRBW(FsVolumeSpi volume, ExtendedBlock eb) throws IOException {
    FsVolumeImpl vol = (FsVolumeImpl) volume;
    final String bpid = eb.getBlockPoolId();
    final Block block = eb.getLocalBlock();
    ReplicaBeingWritten rbw = new ReplicaBeingWritten(eb.getLocalBlock(), volume, vol.createRbwFile(bpid, block).getParentFile(), null);
    rbw.getBlockFile().createNewFile();
    rbw.getMetaFile().createNewFile();
    dataset.volumeMap.add(bpid, rbw);
    return rbw;
}
Also used : ReplicaBeingWritten(org.apache.hadoop.hdfs.server.datanode.ReplicaBeingWritten) ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) Block(org.apache.hadoop.hdfs.protocol.Block)

Aggregations

ExtendedBlock (org.apache.hadoop.hdfs.protocol.ExtendedBlock)208 Test (org.junit.Test)124 Path (org.apache.hadoop.fs.Path)91 Configuration (org.apache.hadoop.conf.Configuration)71 MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)63 FileSystem (org.apache.hadoop.fs.FileSystem)62 HdfsConfiguration (org.apache.hadoop.hdfs.HdfsConfiguration)55 LocatedBlock (org.apache.hadoop.hdfs.protocol.LocatedBlock)53 IOException (java.io.IOException)41 DatanodeInfo (org.apache.hadoop.hdfs.protocol.DatanodeInfo)41 Block (org.apache.hadoop.hdfs.protocol.Block)38 DataNode (org.apache.hadoop.hdfs.server.datanode.DataNode)34 DistributedFileSystem (org.apache.hadoop.hdfs.DistributedFileSystem)32 File (java.io.File)22 FSDataOutputStream (org.apache.hadoop.fs.FSDataOutputStream)20 LocatedBlocks (org.apache.hadoop.hdfs.protocol.LocatedBlocks)20 DatanodeID (org.apache.hadoop.hdfs.protocol.DatanodeID)18 FSNamesystem (org.apache.hadoop.hdfs.server.namenode.FSNamesystem)18 InetSocketAddress (java.net.InetSocketAddress)17 ArrayList (java.util.ArrayList)17