use of org.apache.hadoop.hdfs.protocol.BlockListAsLongs.BlockReportReplica in project hadoop by apache.
the class BlockReportTestBase method getBlockReports.
// Generate a block report, optionally corrupting the generation
// stamp and/or length of one block.
private static StorageBlockReport[] getBlockReports(DataNode dn, String bpid, boolean corruptOneBlockGs, boolean corruptOneBlockLen) {
Map<DatanodeStorage, BlockListAsLongs> perVolumeBlockLists = dn.getFSDataset().getBlockReports(bpid);
// Send block report
StorageBlockReport[] reports = new StorageBlockReport[perVolumeBlockLists.size()];
boolean corruptedGs = false;
boolean corruptedLen = false;
int reportIndex = 0;
for (Map.Entry<DatanodeStorage, BlockListAsLongs> kvPair : perVolumeBlockLists.entrySet()) {
DatanodeStorage dnStorage = kvPair.getKey();
BlockListAsLongs blockList = kvPair.getValue();
// Walk the list of blocks until we find one each to corrupt the
// generation stamp and length, if so requested.
BlockListAsLongs.Builder builder = BlockListAsLongs.builder();
for (BlockReportReplica block : blockList) {
if (corruptOneBlockGs && !corruptedGs) {
long gsOld = block.getGenerationStamp();
long gsNew;
do {
gsNew = rand.nextInt();
} while (gsNew == gsOld);
block.setGenerationStamp(gsNew);
LOG.info("Corrupted the GS for block ID " + block);
corruptedGs = true;
} else if (corruptOneBlockLen && !corruptedLen) {
long lenOld = block.getNumBytes();
long lenNew;
do {
lenNew = rand.nextInt((int) lenOld - 1);
} while (lenNew == lenOld);
block.setNumBytes(lenNew);
LOG.info("Corrupted the length for block ID " + block);
corruptedLen = true;
}
builder.add(new BlockReportReplica(block));
}
reports[reportIndex++] = new StorageBlockReport(dnStorage, builder.build());
}
return reports;
}
use of org.apache.hadoop.hdfs.protocol.BlockListAsLongs.BlockReportReplica in project hadoop by apache.
the class BlockManager method processFirstBlockReport.
/**
* processFirstBlockReport is intended only for processing "initial" block
* reports, the first block report received from a DN after it registers.
* It just adds all the valid replicas to the datanode, without calculating
* a toRemove list (since there won't be any). It also silently discards
* any invalid blocks, thereby deferring their processing until
* the next block report.
* @param storageInfo - DatanodeStorageInfo that sent the report
* @param report - the initial block report, to be processed
* @throws IOException
*/
private void processFirstBlockReport(final DatanodeStorageInfo storageInfo, final BlockListAsLongs report) throws IOException {
if (report == null)
return;
assert (namesystem.hasWriteLock());
assert (storageInfo.getBlockReportCount() == 0);
for (BlockReportReplica iblk : report) {
ReplicaState reportedState = iblk.getState();
if (LOG.isDebugEnabled()) {
LOG.debug("Initial report of block " + iblk.getBlockName() + " on " + storageInfo.getDatanodeDescriptor() + " size " + iblk.getNumBytes() + " replicaState = " + reportedState);
}
if (shouldPostponeBlocksFromFuture && isGenStampInFuture(iblk)) {
queueReportedBlock(storageInfo, iblk, reportedState, QUEUE_REASON_FUTURE_GENSTAMP);
continue;
}
BlockInfo storedBlock = getStoredBlock(iblk);
// an integrity assumption of Name node
if (storedBlock == null) {
bmSafeMode.checkBlocksWithFutureGS(iblk);
continue;
}
// If block is corrupt, mark it and continue to next block.
BlockUCState ucState = storedBlock.getBlockUCState();
BlockToMarkCorrupt c = checkReplicaCorrupt(iblk, reportedState, storedBlock, ucState, storageInfo.getDatanodeDescriptor());
if (c != null) {
if (shouldPostponeBlocksFromFuture) {
// In the Standby, we may receive a block report for a file that we
// just have an out-of-date gen-stamp or state for, for example.
queueReportedBlock(storageInfo, iblk, reportedState, QUEUE_REASON_CORRUPT_STATE);
} else {
markBlockAsCorrupt(c, storageInfo, storageInfo.getDatanodeDescriptor());
}
continue;
}
// If block is under construction, add this replica to its list
if (isBlockUnderConstruction(storedBlock, ucState, reportedState)) {
storedBlock.getUnderConstructionFeature().addReplicaIfNotPresent(storageInfo, iblk, reportedState);
// refer HDFS-5283
if (namesystem.isInSnapshot(storedBlock.getBlockCollectionId())) {
int numOfReplicas = storedBlock.getUnderConstructionFeature().getNumExpectedLocations();
bmSafeMode.incrementSafeBlockCount(numOfReplicas, storedBlock);
}
//and fall through to next clause
}
//add replica if appropriate
if (reportedState == ReplicaState.FINALIZED) {
addStoredBlockImmediate(storedBlock, iblk, storageInfo);
}
}
}
use of org.apache.hadoop.hdfs.protocol.BlockListAsLongs.BlockReportReplica in project hadoop by apache.
the class BlockManager method reportDiffSorted.
private void reportDiffSorted(DatanodeStorageInfo storageInfo, Iterable<BlockReportReplica> newReport, // add to DatanodeDescriptor
Collection<BlockInfoToAdd> toAdd, // remove from DatanodeDescriptor
Collection<BlockInfo> toRemove, // should be removed from DN
Collection<Block> toInvalidate, // add to corrupt replicas list
Collection<BlockToMarkCorrupt> toCorrupt, Collection<StatefulBlockInfo> toUC) {
// add to under-construction list
// The blocks must be sorted and the storagenodes blocks must be sorted
Iterator<BlockInfo> storageBlocksIterator = storageInfo.getBlockIterator();
DatanodeDescriptor dn = storageInfo.getDatanodeDescriptor();
BlockInfo storageBlock = null;
for (BlockReportReplica replica : newReport) {
long replicaID = replica.getBlockId();
if (BlockIdManager.isStripedBlockID(replicaID) && (!hasNonEcBlockUsingStripedID || !blocksMap.containsBlock(replica))) {
replicaID = BlockIdManager.convertToStripedID(replicaID);
}
ReplicaState reportedState = replica.getState();
if (LOG.isDebugEnabled()) {
LOG.debug("Reported block " + replica + " on " + dn + " size " + replica.getNumBytes() + " replicaState = " + reportedState);
}
if (shouldPostponeBlocksFromFuture && isGenStampInFuture(replica)) {
queueReportedBlock(storageInfo, replica, reportedState, QUEUE_REASON_FUTURE_GENSTAMP);
continue;
}
if (storageBlock == null && storageBlocksIterator.hasNext()) {
storageBlock = storageBlocksIterator.next();
}
do {
int cmp;
if (storageBlock == null || (cmp = Long.compare(replicaID, storageBlock.getBlockId())) < 0) {
// Check if block is available in NN but not yet on this storage
BlockInfo nnBlock = blocksMap.getStoredBlock(new Block(replicaID));
if (nnBlock != null) {
reportDiffSortedInner(storageInfo, replica, reportedState, nnBlock, toAdd, toCorrupt, toUC);
} else {
// Replica not found anywhere so it should be invalidated
toInvalidate.add(new Block(replica));
}
break;
} else if (cmp == 0) {
// Replica matched current storageblock
reportDiffSortedInner(storageInfo, replica, reportedState, storageBlock, toAdd, toCorrupt, toUC);
storageBlock = null;
} else {
// Remove all stored blocks with IDs lower than replica
do {
toRemove.add(storageBlock);
storageBlock = storageBlocksIterator.hasNext() ? storageBlocksIterator.next() : null;
} while (storageBlock != null && Long.compare(replicaID, storageBlock.getBlockId()) > 0);
}
} while (storageBlock != null);
}
// Iterate any remaining blocks that have not been reported and remove them
while (storageBlocksIterator.hasNext()) {
toRemove.add(storageBlocksIterator.next());
}
}
use of org.apache.hadoop.hdfs.protocol.BlockListAsLongs.BlockReportReplica in project hadoop by apache.
the class TestBlockManagerSafeMode method injectBlocksWithFugureGS.
private void injectBlocksWithFugureGS(long numBytesInFuture) {
BlockReportReplica brr = mock(BlockReportReplica.class);
when(brr.getBytesOnDisk()).thenReturn(numBytesInFuture);
bmSafeMode.checkBlocksWithFutureGS(brr);
}
use of org.apache.hadoop.hdfs.protocol.BlockListAsLongs.BlockReportReplica in project hadoop by apache.
the class TestBlockListAsLongs method checkReplicas.
private void checkReplicas(Map<Long, Replica> expectedReplicas, BlockListAsLongs decodedBlocks) {
assertEquals(expectedReplicas.size(), decodedBlocks.getNumberOfBlocks());
Map<Long, Replica> reportReplicas = new HashMap<>(expectedReplicas);
for (BlockReportReplica replica : decodedBlocks) {
assertNotNull(replica);
Replica expected = reportReplicas.remove(replica.getBlockId());
assertNotNull(expected);
assertEquals("wrong bytes", expected.getNumBytes(), replica.getNumBytes());
assertEquals("wrong genstamp", expected.getGenerationStamp(), replica.getGenerationStamp());
assertEquals("wrong replica state", expected.getState(), replica.getState());
}
assertTrue(reportReplicas.isEmpty());
}
Aggregations