use of org.apache.hadoop.hdfs.protocol.DatanodeInfo.DatanodeInfoBuilder in project hadoop by apache.
the class DFSStripedOutputStream method updatePipeline.
private void updatePipeline(ExtendedBlock newBG) throws IOException {
final DatanodeInfo[] newNodes = new DatanodeInfo[numAllBlocks];
final String[] newStorageIDs = new String[numAllBlocks];
for (int i = 0; i < numAllBlocks; i++) {
final StripedDataStreamer streamer = getStripedDataStreamer(i);
final DatanodeInfo[] nodes = streamer.getNodes();
final String[] storageIDs = streamer.getStorageIDs();
if (streamer.isHealthy() && nodes != null && storageIDs != null) {
newNodes[i] = nodes[0];
newStorageIDs[i] = storageIDs[0];
} else {
newNodes[i] = new DatanodeInfoBuilder().setNodeID(DatanodeID.EMPTY_DATANODE_ID).build();
newStorageIDs[i] = "";
}
}
dfsClient.namenode.updatePipeline(dfsClient.clientName, currentBlockGroup, newBG, newNodes, newStorageIDs);
currentBlockGroup = newBG;
}
use of org.apache.hadoop.hdfs.protocol.DatanodeInfo.DatanodeInfoBuilder in project hadoop by apache.
the class FSNamesystem method getDatanodeStorageReport.
DatanodeStorageReport[] getDatanodeStorageReport(final DatanodeReportType type) throws AccessControlException, StandbyException {
checkSuperuserPrivilege();
checkOperation(OperationCategory.UNCHECKED);
readLock();
try {
checkOperation(OperationCategory.UNCHECKED);
final DatanodeManager dm = getBlockManager().getDatanodeManager();
final List<DatanodeDescriptor> datanodes = dm.getDatanodeListForReport(type);
DatanodeStorageReport[] reports = new DatanodeStorageReport[datanodes.size()];
for (int i = 0; i < reports.length; i++) {
final DatanodeDescriptor d = datanodes.get(i);
reports[i] = new DatanodeStorageReport(new DatanodeInfoBuilder().setFrom(d).build(), d.getStorageReports());
}
return reports;
} finally {
readUnlock("getDatanodeStorageReport");
}
}
use of org.apache.hadoop.hdfs.protocol.DatanodeInfo.DatanodeInfoBuilder in project hadoop by apache.
the class TestDFSClientSocketSize method getSendBufferSize.
private int getSendBufferSize(Configuration conf) throws IOException {
final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
try {
cluster.waitActive();
LOG.info("MiniDFSCluster started.");
try (Socket socket = DataStreamer.createSocketForPipeline(new DatanodeInfoBuilder().setNodeID(cluster.dataNodes.get(0).datanode.getDatanodeId()).build(), 1, cluster.getFileSystem().getClient())) {
return socket.getSendBufferSize();
}
} finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
use of org.apache.hadoop.hdfs.protocol.DatanodeInfo.DatanodeInfoBuilder in project hadoop by apache.
the class TestFileCorruption method testArrayOutOfBoundsException.
/** Test the case that a replica is reported corrupt while it is not
* in blocksMap. Make sure that ArrayIndexOutOfBounds does not thrown.
* See Hadoop-4351.
*/
@Test
public void testArrayOutOfBoundsException() throws Exception {
MiniDFSCluster cluster = null;
try {
Configuration conf = new HdfsConfiguration();
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
cluster.waitActive();
FileSystem fs = cluster.getFileSystem();
final Path FILE_PATH = new Path("/tmp.txt");
final long FILE_LEN = 1L;
DFSTestUtil.createFile(fs, FILE_PATH, FILE_LEN, (short) 2, 1L);
// get the block
final String bpid = cluster.getNamesystem().getBlockPoolId();
ExtendedBlock blk = getFirstBlock(cluster.getDataNodes().get(0), bpid);
assertFalse("Data directory does not contain any blocks or there was an " + "IO error", blk == null);
// start a third datanode
cluster.startDataNodes(conf, 1, true, null, null);
ArrayList<DataNode> datanodes = cluster.getDataNodes();
assertEquals(datanodes.size(), 3);
DataNode dataNode = datanodes.get(2);
// report corrupted block by the third datanode
DatanodeRegistration dnR = InternalDataNodeTestUtils.getDNRegistrationForBP(dataNode, blk.getBlockPoolId());
FSNamesystem ns = cluster.getNamesystem();
ns.writeLock();
try {
cluster.getNamesystem().getBlockManager().findAndMarkBlockAsCorrupt(blk, new DatanodeInfoBuilder().setNodeID(dnR).build(), "TEST", "STORAGE_ID");
} finally {
ns.writeUnlock();
}
// open the file
fs.open(FILE_PATH);
//clean up
fs.delete(FILE_PATH, false);
} finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
use of org.apache.hadoop.hdfs.protocol.DatanodeInfo.DatanodeInfoBuilder in project hadoop by apache.
the class TestInterDatanodeProtocol method testInterDNProtocolTimeout.
/** Test to verify that InterDatanode RPC timesout as expected when
* the server DN does not respond.
*/
@Test(expected = SocketTimeoutException.class)
public void testInterDNProtocolTimeout() throws Throwable {
final Server server = new TestServer(1, true);
server.start();
final InetSocketAddress addr = NetUtils.getConnectAddress(server);
DatanodeID fakeDnId = DFSTestUtil.getLocalDatanodeID(addr.getPort());
DatanodeInfo dInfo = new DatanodeInfoBuilder().setNodeID(fakeDnId).build();
InterDatanodeProtocol proxy = null;
try {
proxy = DataNode.createInterDataNodeProtocolProxy(dInfo, conf, 500, false);
proxy.initReplicaRecovery(new RecoveringBlock(new ExtendedBlock("bpid", 1), null, 100));
fail("Expected SocketTimeoutException exception, but did not get.");
} finally {
if (proxy != null) {
RPC.stopProxy(proxy);
}
server.stop();
}
}
Aggregations