Search in sources :

Example 1 with BlockWithLocations

use of org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations.BlockWithLocations in project hadoop by apache.

the class BlockManager method addBlock.

/**
   * Get all valid locations of the block & add the block to results
   * @return the length of the added block; 0 if the block is not added. If the
   * added block is a block group, return its approximate internal block size
   */
private long addBlock(BlockInfo block, List<BlockWithLocations> results) {
    final List<DatanodeStorageInfo> locations = getValidLocations(block);
    if (locations.size() == 0) {
        return 0;
    } else {
        final String[] datanodeUuids = new String[locations.size()];
        final String[] storageIDs = new String[datanodeUuids.length];
        final StorageType[] storageTypes = new StorageType[datanodeUuids.length];
        for (int i = 0; i < locations.size(); i++) {
            final DatanodeStorageInfo s = locations.get(i);
            datanodeUuids[i] = s.getDatanodeDescriptor().getDatanodeUuid();
            storageIDs[i] = s.getStorageID();
            storageTypes[i] = s.getStorageType();
        }
        BlockWithLocations blkWithLocs = new BlockWithLocations(block, datanodeUuids, storageIDs, storageTypes);
        if (block.isStriped()) {
            BlockInfoStriped blockStriped = (BlockInfoStriped) block;
            byte[] indices = new byte[locations.size()];
            for (int i = 0; i < locations.size(); i++) {
                indices[i] = (byte) blockStriped.getStorageBlockIndex(locations.get(i));
            }
            results.add(new StripedBlockWithLocations(blkWithLocs, indices, blockStriped.getDataBlockNum(), blockStriped.getCellSize()));
            // approximate size
            return block.getNumBytes() / blockStriped.getDataBlockNum();
        } else {
            results.add(blkWithLocs);
            return block.getNumBytes();
        }
    }
}
Also used : StorageType(org.apache.hadoop.fs.StorageType) StripedBlockWithLocations(org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations.StripedBlockWithLocations) StripedBlockWithLocations(org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations.StripedBlockWithLocations) BlockWithLocations(org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations.BlockWithLocations)

Example 2 with BlockWithLocations

use of org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations.BlockWithLocations in project hadoop by apache.

the class BlockManager method getBlocksWithLocations.

/** Get all blocks with location information from a datanode. */
public BlocksWithLocations getBlocksWithLocations(final DatanodeID datanode, final long size) throws UnregisteredNodeException {
    final DatanodeDescriptor node = getDatanodeManager().getDatanode(datanode);
    if (node == null) {
        blockLog.warn("BLOCK* getBlocks: Asking for blocks from an" + " unrecorded node {}", datanode);
        throw new HadoopIllegalArgumentException("Datanode " + datanode + " not found.");
    }
    int numBlocks = node.numBlocks();
    if (numBlocks == 0) {
        return new BlocksWithLocations(new BlockWithLocations[0]);
    }
    Iterator<BlockInfo> iter = node.getBlockIterator();
    // starting from a random block
    int startBlock = ThreadLocalRandom.current().nextInt(numBlocks);
    // skip blocks
    for (int i = 0; i < startBlock; i++) {
        iter.next();
    }
    List<BlockWithLocations> results = new ArrayList<BlockWithLocations>();
    long totalSize = 0;
    BlockInfo curBlock;
    while (totalSize < size && iter.hasNext()) {
        curBlock = iter.next();
        if (!curBlock.isComplete())
            continue;
        if (curBlock.getNumBytes() < getBlocksMinBlockSize) {
            continue;
        }
        totalSize += addBlock(curBlock, results);
    }
    if (totalSize < size) {
        // start from the beginning
        iter = node.getBlockIterator();
        for (int i = 0; i < startBlock && totalSize < size; i++) {
            curBlock = iter.next();
            if (!curBlock.isComplete())
                continue;
            if (curBlock.getNumBytes() < getBlocksMinBlockSize) {
                continue;
            }
            totalSize += addBlock(curBlock, results);
        }
    }
    return new BlocksWithLocations(results.toArray(new BlockWithLocations[results.size()]));
}
Also used : HadoopIllegalArgumentException(org.apache.hadoop.HadoopIllegalArgumentException) BlocksWithLocations(org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations) ReportedBlockInfo(org.apache.hadoop.hdfs.server.blockmanagement.PendingDataNodeMessages.ReportedBlockInfo) ReceivedDeletedBlockInfo(org.apache.hadoop.hdfs.server.protocol.ReceivedDeletedBlockInfo) ArrayList(java.util.ArrayList) StripedBlockWithLocations(org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations.StripedBlockWithLocations) BlockWithLocations(org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations.BlockWithLocations)

Example 3 with BlockWithLocations

use of org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations.BlockWithLocations in project hadoop by apache.

the class PBHelper method convert.

public static BlocksWithLocations convert(BlocksWithLocationsProto blocks) {
    List<BlockWithLocationsProto> b = blocks.getBlocksList();
    BlockWithLocations[] ret = new BlockWithLocations[b.size()];
    int i = 0;
    for (BlockWithLocationsProto entry : b) {
        ret[i++] = convert(entry);
    }
    return new BlocksWithLocations(ret);
}
Also used : BlocksWithLocations(org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations) StripedBlockWithLocations(org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations.StripedBlockWithLocations) BlockWithLocations(org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations.BlockWithLocations) BlockWithLocationsProto(org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.BlockWithLocationsProto)

Example 4 with BlockWithLocations

use of org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations.BlockWithLocations in project hadoop by apache.

the class TestGetBlocks method testGetBlocks.

/** test getBlocks */
@Test
public void testGetBlocks() throws Exception {
    final Configuration CONF = new HdfsConfiguration();
    final short REPLICATION_FACTOR = (short) 2;
    final int DEFAULT_BLOCK_SIZE = 1024;
    CONF.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, DEFAULT_BLOCK_SIZE);
    CONF.setLong(DFSConfigKeys.DFS_BALANCER_GETBLOCKS_MIN_BLOCK_SIZE_KEY, DEFAULT_BLOCK_SIZE);
    MiniDFSCluster cluster = new MiniDFSCluster.Builder(CONF).numDataNodes(REPLICATION_FACTOR).build();
    try {
        cluster.waitActive();
        // the third block will not be visible to getBlocks
        long fileLen = 2 * DEFAULT_BLOCK_SIZE + 1;
        DFSTestUtil.createFile(cluster.getFileSystem(), new Path("/tmp.txt"), fileLen, REPLICATION_FACTOR, 0L);
        // get blocks & data nodes
        List<LocatedBlock> locatedBlocks;
        DatanodeInfo[] dataNodes = null;
        boolean notWritten;
        do {
            final DFSClient dfsclient = new DFSClient(DFSUtilClient.getNNAddress(CONF), CONF);
            locatedBlocks = dfsclient.getNamenode().getBlockLocations("/tmp.txt", 0, fileLen).getLocatedBlocks();
            assertEquals(3, locatedBlocks.size());
            notWritten = false;
            for (int i = 0; i < 2; i++) {
                dataNodes = locatedBlocks.get(i).getLocations();
                if (dataNodes.length != REPLICATION_FACTOR) {
                    notWritten = true;
                    try {
                        Thread.sleep(10);
                    } catch (InterruptedException e) {
                    }
                    break;
                }
            }
        } while (notWritten);
        // get RPC client to namenode
        InetSocketAddress addr = new InetSocketAddress("localhost", cluster.getNameNodePort());
        NamenodeProtocol namenode = NameNodeProxies.createProxy(CONF, DFSUtilClient.getNNUri(addr), NamenodeProtocol.class).getProxy();
        // get blocks of size fileLen from dataNodes[0]
        BlockWithLocations[] locs;
        locs = namenode.getBlocks(dataNodes[0], fileLen).getBlocks();
        assertEquals(locs.length, 2);
        assertEquals(locs[0].getStorageIDs().length, 2);
        assertEquals(locs[1].getStorageIDs().length, 2);
        // get blocks of size BlockSize from dataNodes[0]
        locs = namenode.getBlocks(dataNodes[0], DEFAULT_BLOCK_SIZE).getBlocks();
        assertEquals(locs.length, 1);
        assertEquals(locs[0].getStorageIDs().length, 2);
        // get blocks of size 1 from dataNodes[0]
        locs = namenode.getBlocks(dataNodes[0], 1).getBlocks();
        assertEquals(locs.length, 1);
        assertEquals(locs[0].getStorageIDs().length, 2);
        // get blocks of size 0 from dataNodes[0]
        getBlocksWithException(namenode, dataNodes[0], 0);
        // get blocks of size -1 from dataNodes[0]
        getBlocksWithException(namenode, dataNodes[0], -1);
        // get blocks of size BlockSize from a non-existent datanode
        DatanodeInfo info = DFSTestUtil.getDatanodeInfo("1.2.3.4");
        getBlocksWithException(namenode, info, 2);
    } finally {
        cluster.shutdown();
    }
}
Also used : Path(org.apache.hadoop.fs.Path) DatanodeInfo(org.apache.hadoop.hdfs.protocol.DatanodeInfo) Configuration(org.apache.hadoop.conf.Configuration) InetSocketAddress(java.net.InetSocketAddress) BlockWithLocations(org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations.BlockWithLocations) LocatedBlock(org.apache.hadoop.hdfs.protocol.LocatedBlock) NamenodeProtocol(org.apache.hadoop.hdfs.server.protocol.NamenodeProtocol) Test(org.junit.Test)

Example 5 with BlockWithLocations

use of org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations.BlockWithLocations in project hadoop by apache.

the class TestPBHelper method testConvertBlockWithLocations.

@Test
public void testConvertBlockWithLocations() {
    boolean[] testSuite = new boolean[] { false, true };
    for (int i = 0; i < testSuite.length; i++) {
        BlockWithLocations locs = getBlockWithLocations(1, testSuite[i]);
        BlockWithLocationsProto locsProto = PBHelper.convert(locs);
        BlockWithLocations locs2 = PBHelper.convert(locsProto);
        compare(locs, locs2);
    }
}
Also used : StripedBlockWithLocations(org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations.StripedBlockWithLocations) BlockWithLocations(org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations.BlockWithLocations) BlockWithLocationsProto(org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.BlockWithLocationsProto) Test(org.junit.Test)

Aggregations

BlockWithLocations (org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations.BlockWithLocations)8 StripedBlockWithLocations (org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations.StripedBlockWithLocations)7 BlocksWithLocations (org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations)3 Test (org.junit.Test)3 ByteString (com.google.protobuf.ByteString)2 StorageType (org.apache.hadoop.fs.StorageType)2 LocatedBlock (org.apache.hadoop.hdfs.protocol.LocatedBlock)2 BlockWithLocationsProto (org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.BlockWithLocationsProto)2 InetSocketAddress (java.net.InetSocketAddress)1 ArrayList (java.util.ArrayList)1 HadoopIllegalArgumentException (org.apache.hadoop.HadoopIllegalArgumentException)1 Configuration (org.apache.hadoop.conf.Configuration)1 Path (org.apache.hadoop.fs.Path)1 Block (org.apache.hadoop.hdfs.protocol.Block)1 DatanodeInfo (org.apache.hadoop.hdfs.protocol.DatanodeInfo)1 ExtendedBlock (org.apache.hadoop.hdfs.protocol.ExtendedBlock)1 StorageTypeProto (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto)1 BlocksWithLocationsProto (org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.BlocksWithLocationsProto)1 ReportedBlockInfo (org.apache.hadoop.hdfs.server.blockmanagement.PendingDataNodeMessages.ReportedBlockInfo)1 RecoveringBlock (org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringBlock)1