Search in sources :

Example 56 with Block

use of org.apache.hadoop.hdfs.protocol.Block in project hadoop by apache.

the class TestBlockManager method addUcBlockToBM.

private BlockInfo addUcBlockToBM(long blkId) {
    Block block = new Block(blkId);
    BlockInfo blockInfo = new BlockInfoContiguous(block, (short) 3);
    blockInfo.convertToBlockUnderConstruction(UNDER_CONSTRUCTION, null);
    long inodeId = ++mockINodeId;
    final INodeFile bc = TestINodeFile.createINodeFile(inodeId);
    blockInfo.setBlockCollectionId(inodeId);
    bm.blocksMap.addBlockCollection(blockInfo, bc);
    doReturn(bc).when(fsn).getBlockCollection(inodeId);
    return blockInfo;
}
Also used : ReceivedDeletedBlockInfo(org.apache.hadoop.hdfs.server.protocol.ReceivedDeletedBlockInfo) Block(org.apache.hadoop.hdfs.protocol.Block) ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) LocatedBlock(org.apache.hadoop.hdfs.protocol.LocatedBlock) TestINodeFile(org.apache.hadoop.hdfs.server.namenode.TestINodeFile) INodeFile(org.apache.hadoop.hdfs.server.namenode.INodeFile)

Example 57 with Block

use of org.apache.hadoop.hdfs.protocol.Block in project hadoop by apache.

the class TestBlockInfo method testAddStorageWithDifferentBlock.

@Test(expected = IllegalArgumentException.class)
public void testAddStorageWithDifferentBlock() throws Exception {
    BlockInfo blockInfo1 = new BlockInfoContiguous(new Block(1000L), (short) 3);
    BlockInfo blockInfo2 = new BlockInfoContiguous(new Block(1001L), (short) 3);
    final DatanodeStorageInfo storage = DFSTestUtil.createDatanodeStorageInfo("storageID", "127.0.0.1");
    blockInfo1.addStorage(storage, blockInfo2);
}
Also used : Block(org.apache.hadoop.hdfs.protocol.Block) Test(org.junit.Test)

Example 58 with Block

use of org.apache.hadoop.hdfs.protocol.Block in project hadoop by apache.

the class TestBalancerWithMultipleNameNodes method unevenDistribution.

/**
   * First start a cluster and fill the cluster up to a certain size. Then
   * redistribute blocks according the required distribution. Finally, balance
   * the cluster.
   *
   * @param nNameNodes Number of NameNodes
   * @param nNameNodesToBalance Number of NameNodes to run the balancer on
   * @param distributionPerNN The distribution for each NameNode.
   * @param capacities Capacities of the datanodes
   * @param racks Rack names
   * @param conf Configuration
   */
private void unevenDistribution(final int nNameNodes, final int nNameNodesToBalance, long[] distributionPerNN, long[] capacities, String[] racks, Configuration conf) throws Exception {
    LOG.info("UNEVEN 0");
    final int nDataNodes = distributionPerNN.length;
    if (capacities.length != nDataNodes || racks.length != nDataNodes) {
        throw new IllegalArgumentException("Array length is not the same");
    }
    if (nNameNodesToBalance > nNameNodes) {
        throw new IllegalArgumentException("Number of namenodes to balance is " + "greater than the number of namenodes.");
    }
    // calculate total space that need to be filled
    final long usedSpacePerNN = TestBalancer.sum(distributionPerNN);
    // fill the cluster
    final ExtendedBlock[][] blocks;
    {
        LOG.info("UNEVEN 1");
        final MiniDFSCluster cluster = new MiniDFSCluster.Builder(new Configuration(conf)).nnTopology(MiniDFSNNTopology.simpleFederatedTopology(nNameNodes)).numDataNodes(nDataNodes).racks(racks).simulatedCapacities(capacities).build();
        LOG.info("UNEVEN 2");
        try {
            cluster.waitActive();
            DFSTestUtil.setFederatedConfiguration(cluster, conf);
            LOG.info("UNEVEN 3");
            final Suite s = new Suite(cluster, nNameNodes, nDataNodes, null, conf);
            blocks = generateBlocks(s, usedSpacePerNN);
            LOG.info("UNEVEN 4");
        } finally {
            cluster.shutdown();
        }
    }
    conf.set(DFSConfigKeys.DFS_NAMENODE_SAFEMODE_THRESHOLD_PCT_KEY, "0.0f");
    {
        LOG.info("UNEVEN 10");
        final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).nnTopology(MiniDFSNNTopology.simpleFederatedTopology(nNameNodes)).numDataNodes(nDataNodes).racks(racks).simulatedCapacities(capacities).format(false).build();
        LOG.info("UNEVEN 11");
        try {
            cluster.waitActive();
            LOG.info("UNEVEN 12");
            Set<String> blockpools = new HashSet<String>();
            for (int i = 0; i < nNameNodesToBalance; i++) {
                blockpools.add(cluster.getNamesystem(i).getBlockPoolId());
            }
            BalancerParameters.Builder b = new BalancerParameters.Builder();
            b.setBlockpools(blockpools);
            BalancerParameters params = b.build();
            final Suite s = new Suite(cluster, nNameNodes, nDataNodes, params, conf);
            for (int n = 0; n < nNameNodes; n++) {
                // redistribute blocks
                final Block[][] blocksDN = TestBalancer.distributeBlocks(blocks[n], s.replication, distributionPerNN);
                for (int d = 0; d < blocksDN.length; d++) cluster.injectBlocks(n, d, Arrays.asList(blocksDN[d]));
                LOG.info("UNEVEN 13: n=" + n);
            }
            final long totalCapacity = TestBalancer.sum(capacities);
            final long totalUsed = nNameNodes * usedSpacePerNN;
            LOG.info("UNEVEN 14");
            runBalancer(s, totalUsed, totalCapacity);
            LOG.info("UNEVEN 15");
        } finally {
            cluster.shutdown();
        }
        LOG.info("UNEVEN 16");
    }
}
Also used : MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) HashSet(java.util.HashSet) Set(java.util.Set) Configuration(org.apache.hadoop.conf.Configuration) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) BalancerParameters(org.apache.hadoop.hdfs.server.balancer.BalancerParameters) ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) LocatedBlock(org.apache.hadoop.hdfs.protocol.LocatedBlock) Block(org.apache.hadoop.hdfs.protocol.Block)

Example 59 with Block

use of org.apache.hadoop.hdfs.protocol.Block in project hadoop by apache.

the class TestLowRedundancyBlockQueues method genStripedBlockInfo.

private BlockInfo genStripedBlockInfo(long id, long numBytes) {
    BlockInfoStriped sblk = new BlockInfoStriped(new Block(id), ecPolicy);
    sblk.setNumBytes(numBytes);
    return sblk;
}
Also used : Block(org.apache.hadoop.hdfs.protocol.Block)

Example 60 with Block

use of org.apache.hadoop.hdfs.protocol.Block in project hadoop by apache.

the class TestCommitBlockSynchronization method testCommitBlockSynchronizationWithCloseAndNonExistantTarget.

@Test
public void testCommitBlockSynchronizationWithCloseAndNonExistantTarget() throws IOException {
    INodeFile file = mockFileUnderConstruction();
    Block block = new Block(blockId, length, genStamp);
    FSNamesystem namesystemSpy = makeNameSystemSpy(block, file);
    DatanodeID[] newTargets = new DatanodeID[] { new DatanodeID("0.0.0.0", "nonexistantHost", "1", 0, 0, 0, 0) };
    String[] storageIDs = new String[] { "fake-storage-ID" };
    ExtendedBlock lastBlock = new ExtendedBlock();
    namesystemSpy.commitBlockSynchronization(lastBlock, genStamp, length, true, false, newTargets, storageIDs);
    // Repeat the call to make sure it returns true
    namesystemSpy.commitBlockSynchronization(lastBlock, genStamp, length, true, false, newTargets, storageIDs);
}
Also used : DatanodeID(org.apache.hadoop.hdfs.protocol.DatanodeID) ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) Block(org.apache.hadoop.hdfs.protocol.Block) Test(org.junit.Test)

Aggregations

Block (org.apache.hadoop.hdfs.protocol.Block)155 LocatedBlock (org.apache.hadoop.hdfs.protocol.LocatedBlock)79 Test (org.junit.Test)77 ExtendedBlock (org.apache.hadoop.hdfs.protocol.ExtendedBlock)74 Path (org.apache.hadoop.fs.Path)28 LocatedStripedBlock (org.apache.hadoop.hdfs.protocol.LocatedStripedBlock)26 IOException (java.io.IOException)24 BlockInfo (org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo)22 Configuration (org.apache.hadoop.conf.Configuration)20 ReceivedDeletedBlockInfo (org.apache.hadoop.hdfs.server.protocol.ReceivedDeletedBlockInfo)18 HdfsConfiguration (org.apache.hadoop.hdfs.HdfsConfiguration)17 BlockInfoContiguous (org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguous)17 CachedBlock (org.apache.hadoop.hdfs.server.namenode.CachedBlock)17 BlockInfoStriped (org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoStriped)15 MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)14 ArrayList (java.util.ArrayList)12 RecoveringBlock (org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringBlock)11 DatanodeStorage (org.apache.hadoop.hdfs.server.protocol.DatanodeStorage)11 FsPermission (org.apache.hadoop.fs.permission.FsPermission)10 DatanodeRegistration (org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration)10