use of org.apache.hadoop.hdfs.server.protocol.DatanodeStorage in project hadoop by apache.
the class TestIncrementalBlockReports method injectBlockReceived.
/**
* Inject a fake 'received' block into the BPServiceActor state.
*/
private void injectBlockReceived() {
ReceivedDeletedBlockInfo rdbi = new ReceivedDeletedBlockInfo(getDummyBlock(), BlockStatus.RECEIVED_BLOCK, null);
DatanodeStorage s = singletonDn.getFSDataset().getStorage(storageUuid);
actor.getIbrManager().notifyNamenodeBlock(rdbi, s, false);
}
use of org.apache.hadoop.hdfs.server.protocol.DatanodeStorage in project hadoop by apache.
the class TestIncrementalBrVariations method verifyIncrementalBlockReports.
public void verifyIncrementalBlockReports(boolean splitReports) throws IOException {
// Get the block list for the file with the block locations.
LocatedBlocks blocks = createFileGetBlocks(GenericTestUtils.getMethodName());
try (FsDatasetSpi.FsVolumeReferences volumes = dn0.getFSDataset().getFsVolumeReferences()) {
// We will send 'fake' incremental block reports to the NN that look
// like they originated from DN 0.
StorageReceivedDeletedBlocks[] reports = new StorageReceivedDeletedBlocks[volumes.size()];
// Lie to the NN that one block on each storage has been deleted.
for (int i = 0; i < reports.length; ++i) {
FsVolumeSpi volume = volumes.get(i);
boolean foundBlockOnStorage = false;
ReceivedDeletedBlockInfo[] rdbi = new ReceivedDeletedBlockInfo[1];
// report.
for (LocatedBlock block : blocks.getLocatedBlocks()) {
if (block.getStorageIDs()[0].equals(volume.getStorageID())) {
rdbi[0] = new ReceivedDeletedBlockInfo(block.getBlock().getLocalBlock(), ReceivedDeletedBlockInfo.BlockStatus.DELETED_BLOCK, null);
foundBlockOnStorage = true;
break;
}
}
assertTrue(foundBlockOnStorage);
reports[i] = new StorageReceivedDeletedBlocks(new DatanodeStorage(volume.getStorageID()), rdbi);
if (splitReports) {
// If we are splitting reports then send the report for this storage now.
StorageReceivedDeletedBlocks[] singletonReport = { reports[i] };
cluster.getNameNodeRpc().blockReceivedAndDeleted(dn0Reg, poolId, singletonReport);
}
}
if (!splitReports) {
// Send a combined report.
cluster.getNameNodeRpc().blockReceivedAndDeleted(dn0Reg, poolId, reports);
}
// Make sure that the deleted block from each storage was picked up
// by the NameNode. IBRs are async, make sure the NN processes
// all of them.
cluster.getNamesystem().getBlockManager().flushBlockOps();
assertThat(cluster.getNamesystem().getMissingBlocksCount(), is((long) reports.length));
}
}
use of org.apache.hadoop.hdfs.server.protocol.DatanodeStorage in project hadoop by apache.
the class DatanodeProtocolServerSideTranslatorPB method blockReceivedAndDeleted.
@Override
public BlockReceivedAndDeletedResponseProto blockReceivedAndDeleted(RpcController controller, BlockReceivedAndDeletedRequestProto request) throws ServiceException {
List<StorageReceivedDeletedBlocksProto> sBlocks = request.getBlocksList();
StorageReceivedDeletedBlocks[] info = new StorageReceivedDeletedBlocks[sBlocks.size()];
for (int i = 0; i < sBlocks.size(); i++) {
StorageReceivedDeletedBlocksProto sBlock = sBlocks.get(i);
List<ReceivedDeletedBlockInfoProto> list = sBlock.getBlocksList();
ReceivedDeletedBlockInfo[] rdBlocks = new ReceivedDeletedBlockInfo[list.size()];
for (int j = 0; j < list.size(); j++) {
rdBlocks[j] = PBHelper.convert(list.get(j));
}
if (sBlock.hasStorage()) {
info[i] = new StorageReceivedDeletedBlocks(PBHelperClient.convert(sBlock.getStorage()), rdBlocks);
} else {
info[i] = new StorageReceivedDeletedBlocks(new DatanodeStorage(sBlock.getStorageUuid()), rdBlocks);
}
}
try {
impl.blockReceivedAndDeleted(PBHelper.convert(request.getRegistration()), request.getBlockPoolId(), info);
} catch (IOException e) {
throw new ServiceException(e);
}
return VOID_BLOCK_RECEIVED_AND_DELETE_RESPONSE;
}
use of org.apache.hadoop.hdfs.server.protocol.DatanodeStorage in project hadoop by apache.
the class IncrementalBlockReportManager method generateIBRs.
private synchronized StorageReceivedDeletedBlocks[] generateIBRs() {
final List<StorageReceivedDeletedBlocks> reports = new ArrayList<>(pendingIBRs.size());
for (Map.Entry<DatanodeStorage, PerStorageIBR> entry : pendingIBRs.entrySet()) {
final PerStorageIBR perStorage = entry.getValue();
// Send newly-received and deleted blockids to namenode
final ReceivedDeletedBlockInfo[] rdbi = perStorage.removeAll();
if (rdbi != null) {
reports.add(new StorageReceivedDeletedBlocks(entry.getKey(), rdbi));
}
}
readyToSend = false;
return reports.toArray(new StorageReceivedDeletedBlocks[reports.size()]);
}
use of org.apache.hadoop.hdfs.server.protocol.DatanodeStorage in project hadoop by apache.
the class FsDatasetImpl method activateVolume.
/**
* Activate a volume to serve requests.
* @throws IOException if the storage UUID already exists.
*/
private void activateVolume(ReplicaMap replicaMap, Storage.StorageDirectory sd, StorageType storageType, FsVolumeReference ref) throws IOException {
try (AutoCloseableLock lock = datasetLock.acquire()) {
DatanodeStorage dnStorage = storageMap.get(sd.getStorageUuid());
if (dnStorage != null) {
final String errorMsg = String.format("Found duplicated storage UUID: %s in %s.", sd.getStorageUuid(), sd.getVersionFile());
LOG.error(errorMsg);
throw new IOException(errorMsg);
}
volumeMap.addAll(replicaMap);
storageMap.put(sd.getStorageUuid(), new DatanodeStorage(sd.getStorageUuid(), DatanodeStorage.State.NORMAL, storageType));
asyncDiskService.addVolume((FsVolumeImpl) ref.getVolume());
volumes.addVolume(ref);
}
}
Aggregations