use of org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations.BlockWithLocations in project hadoop by apache.
the class BlockManager method addBlock.
/**
* Get all valid locations of the block & add the block to results
* @return the length of the added block; 0 if the block is not added. If the
* added block is a block group, return its approximate internal block size
*/
private long addBlock(BlockInfo block, List<BlockWithLocations> results) {
final List<DatanodeStorageInfo> locations = getValidLocations(block);
if (locations.size() == 0) {
return 0;
} else {
final String[] datanodeUuids = new String[locations.size()];
final String[] storageIDs = new String[datanodeUuids.length];
final StorageType[] storageTypes = new StorageType[datanodeUuids.length];
for (int i = 0; i < locations.size(); i++) {
final DatanodeStorageInfo s = locations.get(i);
datanodeUuids[i] = s.getDatanodeDescriptor().getDatanodeUuid();
storageIDs[i] = s.getStorageID();
storageTypes[i] = s.getStorageType();
}
BlockWithLocations blkWithLocs = new BlockWithLocations(block, datanodeUuids, storageIDs, storageTypes);
if (block.isStriped()) {
BlockInfoStriped blockStriped = (BlockInfoStriped) block;
byte[] indices = new byte[locations.size()];
for (int i = 0; i < locations.size(); i++) {
indices[i] = (byte) blockStriped.getStorageBlockIndex(locations.get(i));
}
results.add(new StripedBlockWithLocations(blkWithLocs, indices, blockStriped.getDataBlockNum(), blockStriped.getCellSize()));
// approximate size
return block.getNumBytes() / blockStriped.getDataBlockNum();
} else {
results.add(blkWithLocs);
return block.getNumBytes();
}
}
}
use of org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations.BlockWithLocations in project hadoop by apache.
the class BlockManager method getBlocksWithLocations.
/** Get all blocks with location information from a datanode. */
public BlocksWithLocations getBlocksWithLocations(final DatanodeID datanode, final long size) throws UnregisteredNodeException {
final DatanodeDescriptor node = getDatanodeManager().getDatanode(datanode);
if (node == null) {
blockLog.warn("BLOCK* getBlocks: Asking for blocks from an" + " unrecorded node {}", datanode);
throw new HadoopIllegalArgumentException("Datanode " + datanode + " not found.");
}
int numBlocks = node.numBlocks();
if (numBlocks == 0) {
return new BlocksWithLocations(new BlockWithLocations[0]);
}
Iterator<BlockInfo> iter = node.getBlockIterator();
// starting from a random block
int startBlock = ThreadLocalRandom.current().nextInt(numBlocks);
// skip blocks
for (int i = 0; i < startBlock; i++) {
iter.next();
}
List<BlockWithLocations> results = new ArrayList<BlockWithLocations>();
long totalSize = 0;
BlockInfo curBlock;
while (totalSize < size && iter.hasNext()) {
curBlock = iter.next();
if (!curBlock.isComplete())
continue;
if (curBlock.getNumBytes() < getBlocksMinBlockSize) {
continue;
}
totalSize += addBlock(curBlock, results);
}
if (totalSize < size) {
// start from the beginning
iter = node.getBlockIterator();
for (int i = 0; i < startBlock && totalSize < size; i++) {
curBlock = iter.next();
if (!curBlock.isComplete())
continue;
if (curBlock.getNumBytes() < getBlocksMinBlockSize) {
continue;
}
totalSize += addBlock(curBlock, results);
}
}
return new BlocksWithLocations(results.toArray(new BlockWithLocations[results.size()]));
}
use of org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations.BlockWithLocations in project hadoop by apache.
the class PBHelper method convert.
public static BlocksWithLocations convert(BlocksWithLocationsProto blocks) {
List<BlockWithLocationsProto> b = blocks.getBlocksList();
BlockWithLocations[] ret = new BlockWithLocations[b.size()];
int i = 0;
for (BlockWithLocationsProto entry : b) {
ret[i++] = convert(entry);
}
return new BlocksWithLocations(ret);
}
use of org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations.BlockWithLocations in project hadoop by apache.
the class TestGetBlocks method testGetBlocks.
/** test getBlocks */
@Test
public void testGetBlocks() throws Exception {
final Configuration CONF = new HdfsConfiguration();
final short REPLICATION_FACTOR = (short) 2;
final int DEFAULT_BLOCK_SIZE = 1024;
CONF.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, DEFAULT_BLOCK_SIZE);
CONF.setLong(DFSConfigKeys.DFS_BALANCER_GETBLOCKS_MIN_BLOCK_SIZE_KEY, DEFAULT_BLOCK_SIZE);
MiniDFSCluster cluster = new MiniDFSCluster.Builder(CONF).numDataNodes(REPLICATION_FACTOR).build();
try {
cluster.waitActive();
// the third block will not be visible to getBlocks
long fileLen = 2 * DEFAULT_BLOCK_SIZE + 1;
DFSTestUtil.createFile(cluster.getFileSystem(), new Path("/tmp.txt"), fileLen, REPLICATION_FACTOR, 0L);
// get blocks & data nodes
List<LocatedBlock> locatedBlocks;
DatanodeInfo[] dataNodes = null;
boolean notWritten;
do {
final DFSClient dfsclient = new DFSClient(DFSUtilClient.getNNAddress(CONF), CONF);
locatedBlocks = dfsclient.getNamenode().getBlockLocations("/tmp.txt", 0, fileLen).getLocatedBlocks();
assertEquals(3, locatedBlocks.size());
notWritten = false;
for (int i = 0; i < 2; i++) {
dataNodes = locatedBlocks.get(i).getLocations();
if (dataNodes.length != REPLICATION_FACTOR) {
notWritten = true;
try {
Thread.sleep(10);
} catch (InterruptedException e) {
}
break;
}
}
} while (notWritten);
// get RPC client to namenode
InetSocketAddress addr = new InetSocketAddress("localhost", cluster.getNameNodePort());
NamenodeProtocol namenode = NameNodeProxies.createProxy(CONF, DFSUtilClient.getNNUri(addr), NamenodeProtocol.class).getProxy();
// get blocks of size fileLen from dataNodes[0]
BlockWithLocations[] locs;
locs = namenode.getBlocks(dataNodes[0], fileLen).getBlocks();
assertEquals(locs.length, 2);
assertEquals(locs[0].getStorageIDs().length, 2);
assertEquals(locs[1].getStorageIDs().length, 2);
// get blocks of size BlockSize from dataNodes[0]
locs = namenode.getBlocks(dataNodes[0], DEFAULT_BLOCK_SIZE).getBlocks();
assertEquals(locs.length, 1);
assertEquals(locs[0].getStorageIDs().length, 2);
// get blocks of size 1 from dataNodes[0]
locs = namenode.getBlocks(dataNodes[0], 1).getBlocks();
assertEquals(locs.length, 1);
assertEquals(locs[0].getStorageIDs().length, 2);
// get blocks of size 0 from dataNodes[0]
getBlocksWithException(namenode, dataNodes[0], 0);
// get blocks of size -1 from dataNodes[0]
getBlocksWithException(namenode, dataNodes[0], -1);
// get blocks of size BlockSize from a non-existent datanode
DatanodeInfo info = DFSTestUtil.getDatanodeInfo("1.2.3.4");
getBlocksWithException(namenode, info, 2);
} finally {
cluster.shutdown();
}
}
use of org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations.BlockWithLocations in project hadoop by apache.
the class TestPBHelper method testConvertBlockWithLocations.
@Test
public void testConvertBlockWithLocations() {
boolean[] testSuite = new boolean[] { false, true };
for (int i = 0; i < testSuite.length; i++) {
BlockWithLocations locs = getBlockWithLocations(1, testSuite[i]);
BlockWithLocationsProto locsProto = PBHelper.convert(locs);
BlockWithLocations locs2 = PBHelper.convert(locsProto);
compare(locs, locs2);
}
}
Aggregations