use of org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState in project hadoop by apache.
the class BlockManager method processFirstBlockReport.
/**
* processFirstBlockReport is intended only for processing "initial" block
* reports, the first block report received from a DN after it registers.
* It just adds all the valid replicas to the datanode, without calculating
* a toRemove list (since there won't be any). It also silently discards
* any invalid blocks, thereby deferring their processing until
* the next block report.
* @param storageInfo - DatanodeStorageInfo that sent the report
* @param report - the initial block report, to be processed
* @throws IOException
*/
private void processFirstBlockReport(final DatanodeStorageInfo storageInfo, final BlockListAsLongs report) throws IOException {
if (report == null)
return;
assert (namesystem.hasWriteLock());
assert (storageInfo.getBlockReportCount() == 0);
for (BlockReportReplica iblk : report) {
ReplicaState reportedState = iblk.getState();
if (LOG.isDebugEnabled()) {
LOG.debug("Initial report of block " + iblk.getBlockName() + " on " + storageInfo.getDatanodeDescriptor() + " size " + iblk.getNumBytes() + " replicaState = " + reportedState);
}
if (shouldPostponeBlocksFromFuture && isGenStampInFuture(iblk)) {
queueReportedBlock(storageInfo, iblk, reportedState, QUEUE_REASON_FUTURE_GENSTAMP);
continue;
}
BlockInfo storedBlock = getStoredBlock(iblk);
// an integrity assumption of Name node
if (storedBlock == null) {
bmSafeMode.checkBlocksWithFutureGS(iblk);
continue;
}
// If block is corrupt, mark it and continue to next block.
BlockUCState ucState = storedBlock.getBlockUCState();
BlockToMarkCorrupt c = checkReplicaCorrupt(iblk, reportedState, storedBlock, ucState, storageInfo.getDatanodeDescriptor());
if (c != null) {
if (shouldPostponeBlocksFromFuture) {
// In the Standby, we may receive a block report for a file that we
// just have an out-of-date gen-stamp or state for, for example.
queueReportedBlock(storageInfo, iblk, reportedState, QUEUE_REASON_CORRUPT_STATE);
} else {
markBlockAsCorrupt(c, storageInfo, storageInfo.getDatanodeDescriptor());
}
continue;
}
// If block is under construction, add this replica to its list
if (isBlockUnderConstruction(storedBlock, ucState, reportedState)) {
storedBlock.getUnderConstructionFeature().addReplicaIfNotPresent(storageInfo, iblk, reportedState);
// refer HDFS-5283
if (namesystem.isInSnapshot(storedBlock.getBlockCollectionId())) {
int numOfReplicas = storedBlock.getUnderConstructionFeature().getNumExpectedLocations();
bmSafeMode.incrementSafeBlockCount(numOfReplicas, storedBlock);
}
//and fall through to next clause
}
//add replica if appropriate
if (reportedState == ReplicaState.FINALIZED) {
addStoredBlockImmediate(storedBlock, iblk, storageInfo);
}
}
}
use of org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState in project hadoop by apache.
the class BlockManager method reportDiffSorted.
private void reportDiffSorted(DatanodeStorageInfo storageInfo, Iterable<BlockReportReplica> newReport, // add to DatanodeDescriptor
Collection<BlockInfoToAdd> toAdd, // remove from DatanodeDescriptor
Collection<BlockInfo> toRemove, // should be removed from DN
Collection<Block> toInvalidate, // add to corrupt replicas list
Collection<BlockToMarkCorrupt> toCorrupt, Collection<StatefulBlockInfo> toUC) {
// add to under-construction list
// The blocks must be sorted and the storagenodes blocks must be sorted
Iterator<BlockInfo> storageBlocksIterator = storageInfo.getBlockIterator();
DatanodeDescriptor dn = storageInfo.getDatanodeDescriptor();
BlockInfo storageBlock = null;
for (BlockReportReplica replica : newReport) {
long replicaID = replica.getBlockId();
if (BlockIdManager.isStripedBlockID(replicaID) && (!hasNonEcBlockUsingStripedID || !blocksMap.containsBlock(replica))) {
replicaID = BlockIdManager.convertToStripedID(replicaID);
}
ReplicaState reportedState = replica.getState();
if (LOG.isDebugEnabled()) {
LOG.debug("Reported block " + replica + " on " + dn + " size " + replica.getNumBytes() + " replicaState = " + reportedState);
}
if (shouldPostponeBlocksFromFuture && isGenStampInFuture(replica)) {
queueReportedBlock(storageInfo, replica, reportedState, QUEUE_REASON_FUTURE_GENSTAMP);
continue;
}
if (storageBlock == null && storageBlocksIterator.hasNext()) {
storageBlock = storageBlocksIterator.next();
}
do {
int cmp;
if (storageBlock == null || (cmp = Long.compare(replicaID, storageBlock.getBlockId())) < 0) {
// Check if block is available in NN but not yet on this storage
BlockInfo nnBlock = blocksMap.getStoredBlock(new Block(replicaID));
if (nnBlock != null) {
reportDiffSortedInner(storageInfo, replica, reportedState, nnBlock, toAdd, toCorrupt, toUC);
} else {
// Replica not found anywhere so it should be invalidated
toInvalidate.add(new Block(replica));
}
break;
} else if (cmp == 0) {
// Replica matched current storageblock
reportDiffSortedInner(storageInfo, replica, reportedState, storageBlock, toAdd, toCorrupt, toUC);
storageBlock = null;
} else {
// Remove all stored blocks with IDs lower than replica
do {
toRemove.add(storageBlock);
storageBlock = storageBlocksIterator.hasNext() ? storageBlocksIterator.next() : null;
} while (storageBlock != null && Long.compare(replicaID, storageBlock.getBlockId()) > 0);
}
} while (storageBlock != null);
}
// Iterate any remaining blocks that have not been reported and remove them
while (storageBlocksIterator.hasNext()) {
toRemove.add(storageBlocksIterator.next());
}
}
use of org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState in project hadoop by apache.
the class TestWriteToReplica method testEqualityOfReplicaMap.
/**
* Compare the replica map before and after the restart
**/
private void testEqualityOfReplicaMap(ReplicaMap oldReplicaMap, ReplicaMap newReplicaMap, List<String> bpidList) {
// replicaInfo from oldReplicaMap.
for (String bpid : bpidList) {
for (ReplicaInfo info : newReplicaMap.replicas(bpid)) {
assertNotNull("Volume map before restart didn't contain the " + "blockpool: " + bpid, oldReplicaMap.replicas(bpid));
ReplicaInfo oldReplicaInfo = oldReplicaMap.get(bpid, info.getBlockId());
// Volume map after restart contains a blockpool id which
assertNotNull("Old Replica Map didnt't contain block with blockId: " + info.getBlockId(), oldReplicaInfo);
ReplicaState oldState = oldReplicaInfo.getState();
// converted to RWR
if (info.getState() == ReplicaState.RWR) {
if (oldState == ReplicaState.RWR || oldState == ReplicaState.RBW || oldState == ReplicaState.RUR) {
oldReplicaMap.remove(bpid, oldReplicaInfo);
}
} else if (info.getState() == ReplicaState.FINALIZED && oldState == ReplicaState.FINALIZED) {
oldReplicaMap.remove(bpid, oldReplicaInfo);
}
}
}
// then we didn't persist that replica
for (String bpid : bpidList) {
for (ReplicaInfo replicaInfo : oldReplicaMap.replicas(bpid)) {
if (replicaInfo.getState() != ReplicaState.TEMPORARY) {
Assert.fail("After datanode restart we lost the block with blockId: " + replicaInfo.getBlockId());
}
}
}
}
Aggregations