use of org.apache.hadoop.hdfs.server.protocol.DatanodeStorage in project hadoop by apache.
the class TestDFSShell method getMaterializedReplicas.
private static List<MaterializedReplica> getMaterializedReplicas(MiniDFSCluster cluster) throws IOException {
List<MaterializedReplica> replicas = new ArrayList<>();
String poolId = cluster.getNamesystem().getBlockPoolId();
List<Map<DatanodeStorage, BlockListAsLongs>> blocks = cluster.getAllBlockReports(poolId);
for (int i = 0; i < blocks.size(); i++) {
Map<DatanodeStorage, BlockListAsLongs> map = blocks.get(i);
for (Map.Entry<DatanodeStorage, BlockListAsLongs> e : map.entrySet()) {
for (Block b : e.getValue()) {
replicas.add(cluster.getMaterializedReplica(i, new ExtendedBlock(poolId, b)));
}
}
}
return replicas;
}
use of org.apache.hadoop.hdfs.server.protocol.DatanodeStorage in project hadoop by apache.
the class TestDFSOutputStream method testNoLocalWriteFlag.
@Test
public void testNoLocalWriteFlag() throws IOException {
DistributedFileSystem fs = cluster.getFileSystem();
EnumSet<CreateFlag> flags = EnumSet.of(CreateFlag.NO_LOCAL_WRITE, CreateFlag.CREATE);
BlockManager bm = cluster.getNameNode().getNamesystem().getBlockManager();
DatanodeManager dm = bm.getDatanodeManager();
try (FSDataOutputStream os = fs.create(new Path("/test-no-local"), FsPermission.getDefault(), flags, 512, (short) 2, 512, null)) {
// Inject a DatanodeManager that returns one DataNode as local node for
// the client.
DatanodeManager spyDm = spy(dm);
DatanodeDescriptor dn1 = dm.getDatanodeListForReport(HdfsConstants.DatanodeReportType.LIVE).get(0);
doReturn(dn1).when(spyDm).getDatanodeByHost("127.0.0.1");
Whitebox.setInternalState(bm, "datanodeManager", spyDm);
byte[] buf = new byte[512 * 16];
new Random().nextBytes(buf);
os.write(buf);
} finally {
Whitebox.setInternalState(bm, "datanodeManager", dm);
}
cluster.triggerBlockReports();
final String bpid = cluster.getNamesystem().getBlockPoolId();
// Total number of DataNodes is 3.
assertEquals(3, cluster.getAllBlockReports(bpid).size());
int numDataNodesWithData = 0;
for (Map<DatanodeStorage, BlockListAsLongs> dnBlocks : cluster.getAllBlockReports(bpid)) {
for (BlockListAsLongs blocks : dnBlocks.values()) {
if (blocks.getNumberOfBlocks() > 0) {
numDataNodesWithData++;
break;
}
}
}
// Verify that only one DN has no data.
assertEquals(1, 3 - numDataNodesWithData);
}
use of org.apache.hadoop.hdfs.server.protocol.DatanodeStorage in project hadoop by apache.
the class TestAddStripedBlocks method testAddUCReplica.
/**
* Test BlockInfoStripedUnderConstruction#addReplicaIfNotPresent in different
* scenarios.
*/
@Test
public void testAddUCReplica() throws Exception {
final Path file = new Path("/file1");
final List<String> storageIDs = new ArrayList<>();
// create an empty file
FSDataOutputStream out = null;
try {
out = dfs.create(file, (short) 1);
// 1. create the UC striped block
FSDirectory fsdir = cluster.getNamesystem().getFSDirectory();
INodeFile fileNode = fsdir.getINode4Write(file.toString()).asFile();
cluster.getNamesystem().getAdditionalBlock(file.toString(), fileNode.getId(), dfs.getClient().getClientName(), null, null, null, null);
BlockInfo lastBlock = fileNode.getLastBlock();
DatanodeStorageInfo[] locs = lastBlock.getUnderConstructionFeature().getExpectedStorageLocations();
byte[] indices = lastBlock.getUnderConstructionFeature().getBlockIndices();
Assert.assertEquals(groupSize, locs.length);
Assert.assertEquals(groupSize, indices.length);
// 2. mimic incremental block reports and make sure the uc-replica list in
// the BlockInfoUCStriped is correct
int i = 0;
for (DataNode dn : cluster.getDataNodes()) {
final Block block = new Block(lastBlock.getBlockId() + i++, 0, lastBlock.getGenerationStamp());
DatanodeStorage storage = new DatanodeStorage(UUID.randomUUID().toString());
storageIDs.add(storage.getStorageID());
StorageReceivedDeletedBlocks[] reports = DFSTestUtil.makeReportForReceivedBlock(block, BlockStatus.RECEIVING_BLOCK, storage);
for (StorageReceivedDeletedBlocks report : reports) {
cluster.getNamesystem().processIncrementalBlockReport(dn.getDatanodeId(), report);
}
}
// make sure lastBlock is correct and the storages have been updated
locs = lastBlock.getUnderConstructionFeature().getExpectedStorageLocations();
indices = lastBlock.getUnderConstructionFeature().getBlockIndices();
Assert.assertEquals(groupSize, locs.length);
Assert.assertEquals(groupSize, indices.length);
for (DatanodeStorageInfo newstorage : locs) {
Assert.assertTrue(storageIDs.contains(newstorage.getStorageID()));
}
} finally {
IOUtils.cleanup(null, out);
}
// 3. restart the namenode. mimic the full block reports and check the
// uc-replica list again
cluster.restartNameNode(true);
final String bpId = cluster.getNamesystem().getBlockPoolId();
INodeFile fileNode = cluster.getNamesystem().getFSDirectory().getINode4Write(file.toString()).asFile();
BlockInfo lastBlock = fileNode.getLastBlock();
int i = groupSize - 1;
for (DataNode dn : cluster.getDataNodes()) {
String storageID = storageIDs.get(i);
final Block block = new Block(lastBlock.getBlockId() + i--, lastBlock.getGenerationStamp(), 0);
DatanodeStorage storage = new DatanodeStorage(storageID);
List<ReplicaBeingWritten> blocks = new ArrayList<>();
ReplicaBeingWritten replica = new ReplicaBeingWritten(block, null, null, null);
blocks.add(replica);
BlockListAsLongs bll = BlockListAsLongs.encode(blocks);
StorageBlockReport[] reports = { new StorageBlockReport(storage, bll) };
cluster.getNameNodeRpc().blockReport(dn.getDNRegistrationForBP(bpId), bpId, reports, new BlockReportContext(1, 0, System.nanoTime(), 0, true));
}
DatanodeStorageInfo[] locs = lastBlock.getUnderConstructionFeature().getExpectedStorageLocations();
byte[] indices = lastBlock.getUnderConstructionFeature().getBlockIndices();
Assert.assertEquals(groupSize, locs.length);
Assert.assertEquals(groupSize, indices.length);
for (i = 0; i < groupSize; i++) {
Assert.assertEquals(storageIDs.get(i), locs[groupSize - 1 - i].getStorageID());
Assert.assertEquals(groupSize - i - 1, indices[i]);
}
}
use of org.apache.hadoop.hdfs.server.protocol.DatanodeStorage in project hadoop by apache.
the class DatanodeProtocolServerSideTranslatorPB method blockReceivedAndDeleted.
@Override
public BlockReceivedAndDeletedResponseProto blockReceivedAndDeleted(RpcController controller, BlockReceivedAndDeletedRequestProto request) throws ServiceException {
List<StorageReceivedDeletedBlocksProto> sBlocks = request.getBlocksList();
StorageReceivedDeletedBlocks[] info = new StorageReceivedDeletedBlocks[sBlocks.size()];
for (int i = 0; i < sBlocks.size(); i++) {
StorageReceivedDeletedBlocksProto sBlock = sBlocks.get(i);
List<ReceivedDeletedBlockInfoProto> list = sBlock.getBlocksList();
ReceivedDeletedBlockInfo[] rdBlocks = new ReceivedDeletedBlockInfo[list.size()];
for (int j = 0; j < list.size(); j++) {
rdBlocks[j] = PBHelper.convert(list.get(j));
}
if (sBlock.hasStorage()) {
info[i] = new StorageReceivedDeletedBlocks(PBHelperClient.convert(sBlock.getStorage()), rdBlocks);
} else {
info[i] = new StorageReceivedDeletedBlocks(new DatanodeStorage(sBlock.getStorageUuid()), rdBlocks);
}
}
try {
impl.blockReceivedAndDeleted(PBHelper.convert(request.getRegistration()), request.getBlockPoolId(), info);
} catch (IOException e) {
throw new ServiceException(e);
}
return VOID_BLOCK_RECEIVED_AND_DELETE_RESPONSE;
}
use of org.apache.hadoop.hdfs.server.protocol.DatanodeStorage in project hadoop by apache.
the class IncrementalBlockReportManager method generateIBRs.
private synchronized StorageReceivedDeletedBlocks[] generateIBRs() {
final List<StorageReceivedDeletedBlocks> reports = new ArrayList<>(pendingIBRs.size());
for (Map.Entry<DatanodeStorage, PerStorageIBR> entry : pendingIBRs.entrySet()) {
final PerStorageIBR perStorage = entry.getValue();
// Send newly-received and deleted blockids to namenode
final ReceivedDeletedBlockInfo[] rdbi = perStorage.removeAll();
if (rdbi != null) {
reports.add(new StorageReceivedDeletedBlocks(entry.getKey(), rdbi));
}
}
readyToSend = false;
return reports.toArray(new StorageReceivedDeletedBlocks[reports.size()]);
}
Aggregations