Search in sources :

Example 6 with BlockListAsLongs

use of org.apache.hadoop.hdfs.protocol.BlockListAsLongs in project hadoop by apache.

the class TestAddStripedBlocks method testAddUCReplica.

/**
   * Test BlockInfoStripedUnderConstruction#addReplicaIfNotPresent in different
   * scenarios.
   */
@Test
public void testAddUCReplica() throws Exception {
    final Path file = new Path("/file1");
    final List<String> storageIDs = new ArrayList<>();
    // create an empty file
    FSDataOutputStream out = null;
    try {
        out = dfs.create(file, (short) 1);
        // 1. create the UC striped block
        FSDirectory fsdir = cluster.getNamesystem().getFSDirectory();
        INodeFile fileNode = fsdir.getINode4Write(file.toString()).asFile();
        cluster.getNamesystem().getAdditionalBlock(file.toString(), fileNode.getId(), dfs.getClient().getClientName(), null, null, null, null);
        BlockInfo lastBlock = fileNode.getLastBlock();
        DatanodeStorageInfo[] locs = lastBlock.getUnderConstructionFeature().getExpectedStorageLocations();
        byte[] indices = lastBlock.getUnderConstructionFeature().getBlockIndices();
        Assert.assertEquals(groupSize, locs.length);
        Assert.assertEquals(groupSize, indices.length);
        // 2. mimic incremental block reports and make sure the uc-replica list in
        // the BlockInfoUCStriped is correct
        int i = 0;
        for (DataNode dn : cluster.getDataNodes()) {
            final Block block = new Block(lastBlock.getBlockId() + i++, 0, lastBlock.getGenerationStamp());
            DatanodeStorage storage = new DatanodeStorage(UUID.randomUUID().toString());
            storageIDs.add(storage.getStorageID());
            StorageReceivedDeletedBlocks[] reports = DFSTestUtil.makeReportForReceivedBlock(block, BlockStatus.RECEIVING_BLOCK, storage);
            for (StorageReceivedDeletedBlocks report : reports) {
                cluster.getNamesystem().processIncrementalBlockReport(dn.getDatanodeId(), report);
            }
        }
        // make sure lastBlock is correct and the storages have been updated
        locs = lastBlock.getUnderConstructionFeature().getExpectedStorageLocations();
        indices = lastBlock.getUnderConstructionFeature().getBlockIndices();
        Assert.assertEquals(groupSize, locs.length);
        Assert.assertEquals(groupSize, indices.length);
        for (DatanodeStorageInfo newstorage : locs) {
            Assert.assertTrue(storageIDs.contains(newstorage.getStorageID()));
        }
    } finally {
        IOUtils.cleanup(null, out);
    }
    // 3. restart the namenode. mimic the full block reports and check the
    // uc-replica list again
    cluster.restartNameNode(true);
    final String bpId = cluster.getNamesystem().getBlockPoolId();
    INodeFile fileNode = cluster.getNamesystem().getFSDirectory().getINode4Write(file.toString()).asFile();
    BlockInfo lastBlock = fileNode.getLastBlock();
    int i = groupSize - 1;
    for (DataNode dn : cluster.getDataNodes()) {
        String storageID = storageIDs.get(i);
        final Block block = new Block(lastBlock.getBlockId() + i--, lastBlock.getGenerationStamp(), 0);
        DatanodeStorage storage = new DatanodeStorage(storageID);
        List<ReplicaBeingWritten> blocks = new ArrayList<>();
        ReplicaBeingWritten replica = new ReplicaBeingWritten(block, null, null, null);
        blocks.add(replica);
        BlockListAsLongs bll = BlockListAsLongs.encode(blocks);
        StorageBlockReport[] reports = { new StorageBlockReport(storage, bll) };
        cluster.getNameNodeRpc().blockReport(dn.getDNRegistrationForBP(bpId), bpId, reports, new BlockReportContext(1, 0, System.nanoTime(), 0, true));
    }
    DatanodeStorageInfo[] locs = lastBlock.getUnderConstructionFeature().getExpectedStorageLocations();
    byte[] indices = lastBlock.getUnderConstructionFeature().getBlockIndices();
    Assert.assertEquals(groupSize, locs.length);
    Assert.assertEquals(groupSize, indices.length);
    for (i = 0; i < groupSize; i++) {
        Assert.assertEquals(storageIDs.get(i), locs[groupSize - 1 - i].getStorageID());
        Assert.assertEquals(groupSize - i - 1, indices[i]);
    }
}
Also used : Path(org.apache.hadoop.fs.Path) ReplicaBeingWritten(org.apache.hadoop.hdfs.server.datanode.ReplicaBeingWritten) ArrayList(java.util.ArrayList) StorageBlockReport(org.apache.hadoop.hdfs.server.protocol.StorageBlockReport) DatanodeStorageInfo(org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo) BlockInfo(org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo) ReceivedDeletedBlockInfo(org.apache.hadoop.hdfs.server.protocol.ReceivedDeletedBlockInfo) DataNode(org.apache.hadoop.hdfs.server.datanode.DataNode) BlockReportContext(org.apache.hadoop.hdfs.server.protocol.BlockReportContext) DatanodeStorage(org.apache.hadoop.hdfs.server.protocol.DatanodeStorage) BlockListAsLongs(org.apache.hadoop.hdfs.protocol.BlockListAsLongs) LocatedBlock(org.apache.hadoop.hdfs.protocol.LocatedBlock) Block(org.apache.hadoop.hdfs.protocol.Block) LocatedStripedBlock(org.apache.hadoop.hdfs.protocol.LocatedStripedBlock) StorageReceivedDeletedBlocks(org.apache.hadoop.hdfs.server.protocol.StorageReceivedDeletedBlocks) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream) Test(org.junit.Test)

Example 7 with BlockListAsLongs

use of org.apache.hadoop.hdfs.protocol.BlockListAsLongs in project hadoop by apache.

the class TestInjectionForSimulatedStorage method testInjection.

/* This test makes sure that NameNode retries all the available blocks 
   * for under replicated blocks. This test uses simulated storage and one
   * of its features to inject blocks,
   * 
   * It creates a file with several blocks and replication of 4. 
   * The cluster is then shut down - NN retains its state but the DNs are 
   * all simulated and hence loose their blocks. 
   * The blocks are then injected in one of the DNs. The  expected behaviour is
   * that the NN will arrange for themissing replica will be copied from a valid source.
   */
@Test
public void testInjection() throws IOException {
    MiniDFSCluster cluster = null;
    String testFile = "/replication-test-file";
    Path testPath = new Path(testFile);
    byte[] buffer = new byte[1024];
    for (int i = 0; i < buffer.length; i++) {
        buffer[i] = '1';
    }
    try {
        Configuration conf = new HdfsConfiguration();
        conf.set(DFSConfigKeys.DFS_REPLICATION_KEY, Integer.toString(numDataNodes));
        conf.setInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, checksumSize);
        SimulatedFSDataset.setFactory(conf);
        //first time format
        cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDataNodes).build();
        cluster.waitActive();
        String bpid = cluster.getNamesystem().getBlockPoolId();
        DFSClient dfsClient = new DFSClient(new InetSocketAddress("localhost", cluster.getNameNodePort()), conf);
        DFSTestUtil.createFile(cluster.getFileSystem(), testPath, filesize, filesize, blockSize, (short) numDataNodes, 0L);
        waitForBlockReplication(testFile, dfsClient.getNamenode(), numDataNodes, 20);
        List<Map<DatanodeStorage, BlockListAsLongs>> blocksList = cluster.getAllBlockReports(bpid);
        cluster.shutdown();
        cluster = null;
        /* Start the MiniDFSCluster with more datanodes since once a writeBlock
       * to a datanode node fails, same block can not be written to it
       * immediately. In our case some replication attempts will fail.
       */
        LOG.info("Restarting minicluster");
        conf = new HdfsConfiguration();
        SimulatedFSDataset.setFactory(conf);
        conf.set(DFSConfigKeys.DFS_NAMENODE_SAFEMODE_THRESHOLD_PCT_KEY, "0.0f");
        cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDataNodes * 2).format(false).build();
        cluster.waitActive();
        Set<Block> uniqueBlocks = new HashSet<Block>();
        for (Map<DatanodeStorage, BlockListAsLongs> map : blocksList) {
            for (BlockListAsLongs blockList : map.values()) {
                for (Block b : blockList) {
                    uniqueBlocks.add(new Block(b));
                }
            }
        }
        // Insert all the blocks in the first data node
        LOG.info("Inserting " + uniqueBlocks.size() + " blocks");
        cluster.injectBlocks(0, uniqueBlocks, null);
        dfsClient = new DFSClient(new InetSocketAddress("localhost", cluster.getNameNodePort()), conf);
        waitForBlockReplication(testFile, dfsClient.getNamenode(), numDataNodes, -1);
    } finally {
        if (cluster != null) {
            cluster.shutdown();
        }
    }
}
Also used : Path(org.apache.hadoop.fs.Path) Configuration(org.apache.hadoop.conf.Configuration) InetSocketAddress(java.net.InetSocketAddress) DatanodeStorage(org.apache.hadoop.hdfs.server.protocol.DatanodeStorage) BlockListAsLongs(org.apache.hadoop.hdfs.protocol.BlockListAsLongs) LocatedBlock(org.apache.hadoop.hdfs.protocol.LocatedBlock) Block(org.apache.hadoop.hdfs.protocol.Block) Map(java.util.Map) HashSet(java.util.HashSet) Test(org.junit.Test)

Example 8 with BlockListAsLongs

use of org.apache.hadoop.hdfs.protocol.BlockListAsLongs in project hadoop by apache.

the class TestSimulatedFSDataset method testInjectionEmpty.

@Test
public void testInjectionEmpty() throws IOException {
    SimulatedFSDataset fsdataset = getSimulatedFSDataset();
    BlockListAsLongs blockReport = fsdataset.getBlockReport(bpid);
    assertEquals(0, blockReport.getNumberOfBlocks());
    int bytesAdded = addSomeBlocks(fsdataset);
    blockReport = fsdataset.getBlockReport(bpid);
    assertEquals(NUMBLOCKS, blockReport.getNumberOfBlocks());
    for (Block b : blockReport) {
        assertNotNull(b);
        assertEquals(blockIdToLen(b.getBlockId()), b.getNumBytes());
    }
    // Inject blocks into an empty fsdataset
    //  - injecting the blocks we got above.
    SimulatedFSDataset sfsdataset = getSimulatedFSDataset();
    sfsdataset.injectBlocks(bpid, blockReport);
    blockReport = sfsdataset.getBlockReport(bpid);
    assertEquals(NUMBLOCKS, blockReport.getNumberOfBlocks());
    for (Block b : blockReport) {
        assertNotNull(b);
        assertEquals(blockIdToLen(b.getBlockId()), b.getNumBytes());
        assertEquals(blockIdToLen(b.getBlockId()), sfsdataset.getLength(new ExtendedBlock(bpid, b)));
    }
    assertEquals(bytesAdded, sfsdataset.getDfsUsed());
    assertEquals(sfsdataset.getCapacity() - bytesAdded, sfsdataset.getRemaining());
}
Also used : BlockListAsLongs(org.apache.hadoop.hdfs.protocol.BlockListAsLongs) ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) Block(org.apache.hadoop.hdfs.protocol.Block) Test(org.junit.Test)

Example 9 with BlockListAsLongs

use of org.apache.hadoop.hdfs.protocol.BlockListAsLongs in project hadoop by apache.

the class TestSimulatedFSDataset method testInjectionNonEmpty.

@Test
public void testInjectionNonEmpty() throws IOException {
    SimulatedFSDataset fsdataset = getSimulatedFSDataset();
    BlockListAsLongs blockReport = fsdataset.getBlockReport(bpid);
    assertEquals(0, blockReport.getNumberOfBlocks());
    int bytesAdded = addSomeBlocks(fsdataset);
    blockReport = fsdataset.getBlockReport(bpid);
    assertEquals(NUMBLOCKS, blockReport.getNumberOfBlocks());
    for (Block b : blockReport) {
        assertNotNull(b);
        assertEquals(blockIdToLen(b.getBlockId()), b.getNumBytes());
    }
    fsdataset = null;
    // Inject blocks into an non-empty fsdataset
    //  - injecting the blocks we got above.
    SimulatedFSDataset sfsdataset = getSimulatedFSDataset();
    // Add come blocks whose block ids do not conflict with
    // the ones we are going to inject.
    bytesAdded += addSomeBlocks(sfsdataset, NUMBLOCKS + 1, false);
    sfsdataset.getBlockReport(bpid);
    assertEquals(NUMBLOCKS, blockReport.getNumberOfBlocks());
    sfsdataset.getBlockReport(bpid);
    assertEquals(NUMBLOCKS, blockReport.getNumberOfBlocks());
    sfsdataset.injectBlocks(bpid, blockReport);
    blockReport = sfsdataset.getBlockReport(bpid);
    assertEquals(NUMBLOCKS * 2, blockReport.getNumberOfBlocks());
    for (Block b : blockReport) {
        assertNotNull(b);
        assertEquals(blockIdToLen(b.getBlockId()), b.getNumBytes());
        assertEquals(blockIdToLen(b.getBlockId()), sfsdataset.getLength(new ExtendedBlock(bpid, b)));
    }
    assertEquals(bytesAdded, sfsdataset.getDfsUsed());
    assertEquals(sfsdataset.getCapacity() - bytesAdded, sfsdataset.getRemaining());
    // Now test that the dataset cannot be created if it does not have sufficient cap
    conf.setLong(SimulatedFSDataset.CONFIG_PROPERTY_CAPACITY, 10);
    try {
        sfsdataset = getSimulatedFSDataset();
        sfsdataset.addBlockPool(bpid, conf);
        sfsdataset.injectBlocks(bpid, blockReport);
        assertTrue("Expected an IO exception", false);
    } catch (IOException e) {
    // ok - as expected
    }
}
Also used : BlockListAsLongs(org.apache.hadoop.hdfs.protocol.BlockListAsLongs) ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) Block(org.apache.hadoop.hdfs.protocol.Block) IOException(java.io.IOException) Test(org.junit.Test)

Example 10 with BlockListAsLongs

use of org.apache.hadoop.hdfs.protocol.BlockListAsLongs in project hadoop by apache.

the class TestSimulatedFSDataset method testGetBlockReport.

@Test
public void testGetBlockReport() throws IOException {
    SimulatedFSDataset fsdataset = getSimulatedFSDataset();
    BlockListAsLongs blockReport = fsdataset.getBlockReport(bpid);
    assertEquals(0, blockReport.getNumberOfBlocks());
    addSomeBlocks(fsdataset);
    blockReport = fsdataset.getBlockReport(bpid);
    assertEquals(NUMBLOCKS, blockReport.getNumberOfBlocks());
    for (Block b : blockReport) {
        assertNotNull(b);
        assertEquals(blockIdToLen(b.getBlockId()), b.getNumBytes());
    }
}
Also used : BlockListAsLongs(org.apache.hadoop.hdfs.protocol.BlockListAsLongs) ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) Block(org.apache.hadoop.hdfs.protocol.Block) Test(org.junit.Test)

Aggregations

BlockListAsLongs (org.apache.hadoop.hdfs.protocol.BlockListAsLongs)23 DatanodeStorage (org.apache.hadoop.hdfs.server.protocol.DatanodeStorage)12 Test (org.junit.Test)11 ArrayList (java.util.ArrayList)8 Map (java.util.Map)8 StorageBlockReport (org.apache.hadoop.hdfs.server.protocol.StorageBlockReport)8 Path (org.apache.hadoop.fs.Path)7 Block (org.apache.hadoop.hdfs.protocol.Block)7 ExtendedBlock (org.apache.hadoop.hdfs.protocol.ExtendedBlock)5 CoreMatchers.containsString (org.hamcrest.CoreMatchers.containsString)4 Matchers.anyString (org.mockito.Matchers.anyString)4 IOException (java.io.IOException)3 BlockReportReplica (org.apache.hadoop.hdfs.protocol.BlockListAsLongs.BlockReportReplica)3 LocatedBlock (org.apache.hadoop.hdfs.protocol.LocatedBlock)3 BlockReportContext (org.apache.hadoop.hdfs.server.protocol.BlockReportContext)3 AutoCloseableLock (org.apache.hadoop.util.AutoCloseableLock)3 ServiceException (com.google.protobuf.ServiceException)2 HashMap (java.util.HashMap)2 HashSet (java.util.HashSet)2 Configuration (org.apache.hadoop.conf.Configuration)2