use of java.util.BitSet in project hadoop by apache.
the class StripedReader method initZeroStrip.
private void initZeroStrip() {
if (zeroStripeBuffers != null) {
for (int i = 0; i < zeroStripeBuffers.length; i++) {
zeroStripeBuffers[i] = reconstructor.allocateBuffer(bufferSize);
}
}
BitSet bitset = reconstructor.getLiveBitSet();
int k = 0;
for (int i = 0; i < dataBlkNum + parityBlkNum; i++) {
if (!bitset.get(i)) {
if (reconstructor.getBlockLen(i) <= 0) {
zeroStripeIndices[k++] = (short) i;
}
}
}
}
use of java.util.BitSet in project hadoop by apache.
the class StripedWriter method initTargetIndices.
private void initTargetIndices() {
BitSet bitset = reconstructor.getLiveBitSet();
int m = 0;
hasValidTargets = false;
for (int i = 0; i < dataBlkNum + parityBlkNum; i++) {
if (!bitset.get(i)) {
if (reconstructor.getBlockLen(i) > 0) {
if (m < targets.length) {
targetIndices[m++] = (short) i;
hasValidTargets = true;
}
}
}
}
}
use of java.util.BitSet in project hadoop by apache.
the class BlockManager method countReplicasForStripedBlock.
/**
* For a striped block, it is possible it contains full number of internal
* blocks (i.e., 9 by default), but with duplicated replicas of the same
* internal block. E.g., for the following list of internal blocks
* b0, b0, b1, b2, b3, b4, b5, b6, b7
* we have 9 internal blocks but we actually miss b8.
* We should use this method to detect the above scenario and schedule
* necessary reconstruction.
*/
private void countReplicasForStripedBlock(NumberReplicas counters, BlockInfoStriped block, Collection<DatanodeDescriptor> nodesCorrupt, boolean inStartupSafeMode) {
BitSet bitSet = new BitSet(block.getTotalBlockNum());
for (StorageAndBlockIndex si : block.getStorageAndIndexInfos()) {
StoredReplicaState state = checkReplicaOnStorage(counters, block, si.getStorage(), nodesCorrupt, inStartupSafeMode);
if (state == StoredReplicaState.LIVE) {
if (!bitSet.get(si.getBlockIndex())) {
bitSet.set(si.getBlockIndex());
} else {
counters.subtract(StoredReplicaState.LIVE, 1);
counters.add(StoredReplicaState.REDUNDANT, 1);
}
}
}
}
use of java.util.BitSet in project hadoop by apache.
the class BlockManager method chooseSourceDatanodes.
/**
* Parse the data-nodes the block belongs to and choose a certain number
* from them to be the recovery sources.
*
* We prefer nodes that are in DECOMMISSION_INPROGRESS state to other nodes
* since the former do not have write traffic and hence are less busy.
* We do not use already decommissioned nodes as a source.
* Otherwise we randomly choose nodes among those that did not reach their
* replication limits. However, if the recovery work is of the highest
* priority and all nodes have reached their replication limits, we will
* randomly choose the desired number of nodes despite the replication limit.
*
* In addition form a list of all nodes containing the block
* and calculate its replication numbers.
*
* @param block Block for which a replication source is needed
* @param containingNodes List to be populated with nodes found to contain
* the given block
* @param nodesContainingLiveReplicas List to be populated with nodes found
* to contain live replicas of the given
* block
* @param numReplicas NumberReplicas instance to be initialized with the
* counts of live, corrupt, excess, and decommissioned
* replicas of the given block.
* @param liveBlockIndices List to be populated with indices of healthy
* blocks in a striped block group
* @param priority integer representing replication priority of the given
* block
* @return the array of DatanodeDescriptor of the chosen nodes from which to
* recover the given block
*/
@VisibleForTesting
DatanodeDescriptor[] chooseSourceDatanodes(BlockInfo block, List<DatanodeDescriptor> containingNodes, List<DatanodeStorageInfo> nodesContainingLiveReplicas, NumberReplicas numReplicas, List<Byte> liveBlockIndices, int priority) {
containingNodes.clear();
nodesContainingLiveReplicas.clear();
List<DatanodeDescriptor> srcNodes = new ArrayList<>();
liveBlockIndices.clear();
final boolean isStriped = block.isStriped();
BitSet bitSet = isStriped ? new BitSet(((BlockInfoStriped) block).getTotalBlockNum()) : null;
for (DatanodeStorageInfo storage : blocksMap.getStorages(block)) {
final DatanodeDescriptor node = storage.getDatanodeDescriptor();
final StoredReplicaState state = checkReplicaOnStorage(numReplicas, block, storage, corruptReplicas.getNodes(block), false);
if (state == StoredReplicaState.LIVE) {
nodesContainingLiveReplicas.add(storage);
}
containingNodes.add(node);
// do not select the replica if it is corrupt or excess
if (state == StoredReplicaState.CORRUPT || state == StoredReplicaState.EXCESS) {
continue;
}
// suitable for read or unknown state replicas.
if (state == null || state == StoredReplicaState.DECOMMISSIONED || state == StoredReplicaState.MAINTENANCE_NOT_FOR_READ) {
continue;
}
if (priority != LowRedundancyBlocks.QUEUE_HIGHEST_PRIORITY && (!node.isDecommissionInProgress() && !node.isEnteringMaintenance()) && node.getNumberOfBlocksToBeReplicated() >= maxReplicationStreams) {
// already reached replication limit
continue;
}
if (node.getNumberOfBlocksToBeReplicated() >= replicationStreamsHardLimit) {
continue;
}
if (isStriped || srcNodes.isEmpty()) {
srcNodes.add(node);
if (isStriped) {
byte blockIndex = ((BlockInfoStriped) block).getStorageBlockIndex(storage);
liveBlockIndices.add(blockIndex);
if (!bitSet.get(blockIndex)) {
bitSet.set(blockIndex);
} else if (state == StoredReplicaState.LIVE) {
numReplicas.subtract(StoredReplicaState.LIVE, 1);
numReplicas.add(StoredReplicaState.REDUNDANT, 1);
}
}
continue;
}
// if the node failed to replicate the block on previous iterations
if (ThreadLocalRandom.current().nextBoolean()) {
srcNodes.set(0, node);
}
}
return srcNodes.toArray(new DatanodeDescriptor[srcNodes.size()]);
}
use of java.util.BitSet in project hadoop by apache.
the class TestAddOverReplicatedStripedBlocks method testProcessOverReplicatedAndMissingStripedBlock.
// This test is going to be rewritten in HDFS-10854. Ignoring this test
// temporarily as it fails with the fix for HDFS-10301.
@Ignore
@Test
public void testProcessOverReplicatedAndMissingStripedBlock() throws Exception {
long fileLen = cellSize * dataBlocks;
DFSTestUtil.createStripedFile(cluster, filePath, null, 1, stripesPerBlock, false);
LocatedBlocks lbs = cluster.getNameNodeRpc().getBlockLocations(filePath.toString(), 0, fileLen);
LocatedStripedBlock bg = (LocatedStripedBlock) (lbs.get(0));
long gs = bg.getBlock().getGenerationStamp();
String bpid = bg.getBlock().getBlockPoolId();
long groupId = bg.getBlock().getBlockId();
Block blk = new Block(groupId, blockSize, gs);
// only inject GROUP_SIZE - 1 blocks, so there is one block missing
for (int i = 0; i < groupSize - 1; i++) {
blk.setBlockId(groupId + i);
cluster.injectBlocks(i, Arrays.asList(blk), bpid);
}
cluster.triggerBlockReports();
// let a internal block be over replicated with 2 redundant blocks.
// Therefor number of internal blocks is over GROUP_SIZE. (5 data blocks +
// 3 parity blocks + 2 redundant blocks > GROUP_SIZE)
blk.setBlockId(groupId + 2);
cluster.injectBlocks(numDNs - 3, Arrays.asList(blk), bpid);
cluster.injectBlocks(numDNs - 2, Arrays.asList(blk), bpid);
// update blocksMap
cluster.triggerBlockReports();
Thread.sleep(2000);
// add to invalidates
cluster.triggerHeartbeats();
// datanode delete block
cluster.triggerHeartbeats();
// update blocksMap
cluster.triggerBlockReports();
// Since one block is missing, then over-replicated blocks will not be
// deleted until reconstruction happens
lbs = cluster.getNameNodeRpc().getBlockLocations(filePath.toString(), 0, fileLen);
bg = (LocatedStripedBlock) (lbs.get(0));
assertEquals(groupSize + 1, bg.getBlockIndices().length);
assertEquals(groupSize + 1, bg.getLocations().length);
BitSet set = new BitSet(groupSize);
for (byte index : bg.getBlockIndices()) {
set.set(index);
}
Assert.assertFalse(set.get(groupSize - 1));
for (int i = 0; i < groupSize - 1; i++) {
assertTrue(set.get(i));
}
}
Aggregations