Search in sources :

Example 61 with ExtendedBlock

use of org.apache.hadoop.hdfs.protocol.ExtendedBlock in project hadoop by apache.

the class TestBalancerWithMultipleNameNodes method unevenDistribution.

/**
   * First start a cluster and fill the cluster up to a certain size. Then
   * redistribute blocks according the required distribution. Finally, balance
   * the cluster.
   *
   * @param nNameNodes Number of NameNodes
   * @param nNameNodesToBalance Number of NameNodes to run the balancer on
   * @param distributionPerNN The distribution for each NameNode.
   * @param capacities Capacities of the datanodes
   * @param racks Rack names
   * @param conf Configuration
   */
private void unevenDistribution(final int nNameNodes, final int nNameNodesToBalance, long[] distributionPerNN, long[] capacities, String[] racks, Configuration conf) throws Exception {
    LOG.info("UNEVEN 0");
    final int nDataNodes = distributionPerNN.length;
    if (capacities.length != nDataNodes || racks.length != nDataNodes) {
        throw new IllegalArgumentException("Array length is not the same");
    }
    if (nNameNodesToBalance > nNameNodes) {
        throw new IllegalArgumentException("Number of namenodes to balance is " + "greater than the number of namenodes.");
    }
    // calculate total space that need to be filled
    final long usedSpacePerNN = TestBalancer.sum(distributionPerNN);
    // fill the cluster
    final ExtendedBlock[][] blocks;
    {
        LOG.info("UNEVEN 1");
        final MiniDFSCluster cluster = new MiniDFSCluster.Builder(new Configuration(conf)).nnTopology(MiniDFSNNTopology.simpleFederatedTopology(nNameNodes)).numDataNodes(nDataNodes).racks(racks).simulatedCapacities(capacities).build();
        LOG.info("UNEVEN 2");
        try {
            cluster.waitActive();
            DFSTestUtil.setFederatedConfiguration(cluster, conf);
            LOG.info("UNEVEN 3");
            final Suite s = new Suite(cluster, nNameNodes, nDataNodes, null, conf);
            blocks = generateBlocks(s, usedSpacePerNN);
            LOG.info("UNEVEN 4");
        } finally {
            cluster.shutdown();
        }
    }
    conf.set(DFSConfigKeys.DFS_NAMENODE_SAFEMODE_THRESHOLD_PCT_KEY, "0.0f");
    {
        LOG.info("UNEVEN 10");
        final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).nnTopology(MiniDFSNNTopology.simpleFederatedTopology(nNameNodes)).numDataNodes(nDataNodes).racks(racks).simulatedCapacities(capacities).format(false).build();
        LOG.info("UNEVEN 11");
        try {
            cluster.waitActive();
            LOG.info("UNEVEN 12");
            Set<String> blockpools = new HashSet<String>();
            for (int i = 0; i < nNameNodesToBalance; i++) {
                blockpools.add(cluster.getNamesystem(i).getBlockPoolId());
            }
            BalancerParameters.Builder b = new BalancerParameters.Builder();
            b.setBlockpools(blockpools);
            BalancerParameters params = b.build();
            final Suite s = new Suite(cluster, nNameNodes, nDataNodes, params, conf);
            for (int n = 0; n < nNameNodes; n++) {
                // redistribute blocks
                final Block[][] blocksDN = TestBalancer.distributeBlocks(blocks[n], s.replication, distributionPerNN);
                for (int d = 0; d < blocksDN.length; d++) cluster.injectBlocks(n, d, Arrays.asList(blocksDN[d]));
                LOG.info("UNEVEN 13: n=" + n);
            }
            final long totalCapacity = TestBalancer.sum(capacities);
            final long totalUsed = nNameNodes * usedSpacePerNN;
            LOG.info("UNEVEN 14");
            runBalancer(s, totalUsed, totalCapacity);
            LOG.info("UNEVEN 15");
        } finally {
            cluster.shutdown();
        }
        LOG.info("UNEVEN 16");
    }
}
Also used : MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) HashSet(java.util.HashSet) Set(java.util.Set) Configuration(org.apache.hadoop.conf.Configuration) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) BalancerParameters(org.apache.hadoop.hdfs.server.balancer.BalancerParameters) ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) LocatedBlock(org.apache.hadoop.hdfs.protocol.LocatedBlock) Block(org.apache.hadoop.hdfs.protocol.Block)

Example 62 with ExtendedBlock

use of org.apache.hadoop.hdfs.protocol.ExtendedBlock in project hadoop by apache.

the class TestOverReplicatedBlocks method testInvalidateOverReplicatedBlock.

/**
   * Test over replicated block should get invalidated when decreasing the
   * replication for a partial block.
   */
@Test
public void testInvalidateOverReplicatedBlock() throws Exception {
    Configuration conf = new HdfsConfiguration();
    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
    try {
        final FSNamesystem namesystem = cluster.getNamesystem();
        final BlockManager bm = namesystem.getBlockManager();
        FileSystem fs = cluster.getFileSystem();
        Path p = new Path(MiniDFSCluster.getBaseDirectory(), "/foo1");
        FSDataOutputStream out = fs.create(p, (short) 2);
        out.writeBytes("HDFS-3119: " + p);
        out.hsync();
        fs.setReplication(p, (short) 1);
        out.close();
        ExtendedBlock block = DFSTestUtil.getFirstBlock(fs, p);
        assertEquals("Expected only one live replica for the block", 1, bm.countNodes(bm.getStoredBlock(block.getLocalBlock())).liveReplicas());
    } finally {
        cluster.shutdown();
    }
}
Also used : Path(org.apache.hadoop.fs.Path) MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) Configuration(org.apache.hadoop.conf.Configuration) FileSystem(org.apache.hadoop.fs.FileSystem) ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) FSNamesystem(org.apache.hadoop.hdfs.server.namenode.FSNamesystem) Test(org.junit.Test)

Example 63 with ExtendedBlock

use of org.apache.hadoop.hdfs.protocol.ExtendedBlock in project hadoop by apache.

the class TestCommitBlockSynchronization method testCommitBlockSynchronizationWithCloseAndNonExistantTarget.

@Test
public void testCommitBlockSynchronizationWithCloseAndNonExistantTarget() throws IOException {
    INodeFile file = mockFileUnderConstruction();
    Block block = new Block(blockId, length, genStamp);
    FSNamesystem namesystemSpy = makeNameSystemSpy(block, file);
    DatanodeID[] newTargets = new DatanodeID[] { new DatanodeID("0.0.0.0", "nonexistantHost", "1", 0, 0, 0, 0) };
    String[] storageIDs = new String[] { "fake-storage-ID" };
    ExtendedBlock lastBlock = new ExtendedBlock();
    namesystemSpy.commitBlockSynchronization(lastBlock, genStamp, length, true, false, newTargets, storageIDs);
    // Repeat the call to make sure it returns true
    namesystemSpy.commitBlockSynchronization(lastBlock, genStamp, length, true, false, newTargets, storageIDs);
}
Also used : DatanodeID(org.apache.hadoop.hdfs.protocol.DatanodeID) ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) Block(org.apache.hadoop.hdfs.protocol.Block) Test(org.junit.Test)

Example 64 with ExtendedBlock

use of org.apache.hadoop.hdfs.protocol.ExtendedBlock in project hadoop by apache.

the class TestCommitBlockSynchronization method testCommitBlockSynchronizationWithDelete.

@Test
public void testCommitBlockSynchronizationWithDelete() throws IOException {
    INodeFile file = mockFileUnderConstruction();
    Block block = new Block(blockId, length, genStamp);
    FSNamesystem namesystemSpy = makeNameSystemSpy(block, file);
    DatanodeID[] newTargets = new DatanodeID[0];
    ExtendedBlock lastBlock = new ExtendedBlock();
    namesystemSpy.commitBlockSynchronization(lastBlock, genStamp, length, false, true, newTargets, null);
    // Simulate removing the last block from the file.
    doReturn(null).when(file).removeLastBlock(any(Block.class));
    // Repeat the call to make sure it does not throw
    namesystemSpy.commitBlockSynchronization(lastBlock, genStamp, length, false, true, newTargets, null);
}
Also used : DatanodeID(org.apache.hadoop.hdfs.protocol.DatanodeID) ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) Block(org.apache.hadoop.hdfs.protocol.Block) Test(org.junit.Test)

Example 65 with ExtendedBlock

use of org.apache.hadoop.hdfs.protocol.ExtendedBlock in project hadoop by apache.

the class TestUpdatePipelineWithSnapshots method testUpdatePipelineAfterDelete.

// Regression test for HDFS-6647.
@Test
public void testUpdatePipelineAfterDelete() throws Exception {
    Configuration conf = new HdfsConfiguration();
    Path file = new Path("/test-file");
    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
    try {
        FileSystem fs = cluster.getFileSystem();
        NamenodeProtocols namenode = cluster.getNameNodeRpc();
        DFSOutputStream out = null;
        try {
            // Create a file and make sure a block is allocated for it.
            out = (DFSOutputStream) (fs.create(file).getWrappedStream());
            out.write(1);
            out.hflush();
            // Create a snapshot that includes the file.
            SnapshotTestHelper.createSnapshot((DistributedFileSystem) fs, new Path("/"), "s1");
            // Grab the block info of this file for later use.
            FSDataInputStream in = null;
            ExtendedBlock oldBlock = null;
            try {
                in = fs.open(file);
                oldBlock = DFSTestUtil.getAllBlocks(in).get(0).getBlock();
            } finally {
                IOUtils.closeStream(in);
            }
            // Allocate a new block ID/gen stamp so we can simulate pipeline
            // recovery.
            String clientName = ((DistributedFileSystem) fs).getClient().getClientName();
            LocatedBlock newLocatedBlock = namenode.updateBlockForPipeline(oldBlock, clientName);
            ExtendedBlock newBlock = new ExtendedBlock(oldBlock.getBlockPoolId(), oldBlock.getBlockId(), oldBlock.getNumBytes(), newLocatedBlock.getBlock().getGenerationStamp());
            // Delete the file from the present FS. It will still exist the
            // previously-created snapshot. This will log an OP_DELETE for the
            // file in question.
            fs.delete(file, true);
            // logged for the file in question.
            try {
                namenode.updatePipeline(clientName, oldBlock, newBlock, newLocatedBlock.getLocations(), newLocatedBlock.getStorageIDs());
            } catch (IOException ioe) {
                // normal
                assertExceptionContains("does not exist or it is not under construction", ioe);
            }
            // Make sure the NN can restart with the edit logs as we have them now.
            cluster.restartNameNode(true);
        } finally {
            IOUtils.closeStream(out);
        }
    } finally {
        cluster.shutdown();
    }
}
Also used : Path(org.apache.hadoop.fs.Path) NamenodeProtocols(org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols) MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) Configuration(org.apache.hadoop.conf.Configuration) ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) LocatedBlock(org.apache.hadoop.hdfs.protocol.LocatedBlock) IOException(java.io.IOException) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) FileSystem(org.apache.hadoop.fs.FileSystem) FSDataInputStream(org.apache.hadoop.fs.FSDataInputStream) DFSOutputStream(org.apache.hadoop.hdfs.DFSOutputStream) Test(org.junit.Test)

Aggregations

ExtendedBlock (org.apache.hadoop.hdfs.protocol.ExtendedBlock)208 Test (org.junit.Test)124 Path (org.apache.hadoop.fs.Path)91 Configuration (org.apache.hadoop.conf.Configuration)71 MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)63 FileSystem (org.apache.hadoop.fs.FileSystem)62 HdfsConfiguration (org.apache.hadoop.hdfs.HdfsConfiguration)55 LocatedBlock (org.apache.hadoop.hdfs.protocol.LocatedBlock)53 IOException (java.io.IOException)41 DatanodeInfo (org.apache.hadoop.hdfs.protocol.DatanodeInfo)41 Block (org.apache.hadoop.hdfs.protocol.Block)38 DataNode (org.apache.hadoop.hdfs.server.datanode.DataNode)34 DistributedFileSystem (org.apache.hadoop.hdfs.DistributedFileSystem)32 File (java.io.File)22 FSDataOutputStream (org.apache.hadoop.fs.FSDataOutputStream)20 LocatedBlocks (org.apache.hadoop.hdfs.protocol.LocatedBlocks)20 DatanodeID (org.apache.hadoop.hdfs.protocol.DatanodeID)18 FSNamesystem (org.apache.hadoop.hdfs.server.namenode.FSNamesystem)18 InetSocketAddress (java.net.InetSocketAddress)17 ArrayList (java.util.ArrayList)17