use of org.apache.hadoop.hdfs.protocol.DatanodeInfo.DatanodeInfoBuilder in project hadoop by apache.
the class TestPBHelper method testDataNodeInfoPBHelper.
@Test
public void testDataNodeInfoPBHelper() {
DatanodeID id = DFSTestUtil.getLocalDatanodeID();
DatanodeInfo dnInfos0 = new DatanodeInfoBuilder().setNodeID(id).build();
dnInfos0.setCapacity(3500L);
dnInfos0.setDfsUsed(1000L);
dnInfos0.setNonDfsUsed(2000L);
dnInfos0.setRemaining(500L);
HdfsProtos.DatanodeInfoProto dnproto = PBHelperClient.convert(dnInfos0);
DatanodeInfo dnInfos1 = PBHelperClient.convert(dnproto);
compare(dnInfos0, dnInfos1);
assertEquals(dnInfos0.getNonDfsUsed(), dnInfos1.getNonDfsUsed());
//Testing without nonDfs field
HdfsProtos.DatanodeInfoProto.Builder b = HdfsProtos.DatanodeInfoProto.newBuilder();
b.setId(PBHelperClient.convert(id)).setCapacity(3500L).setDfsUsed(1000L).setRemaining(500L);
DatanodeInfo dnInfos3 = PBHelperClient.convert(b.build());
assertEquals(dnInfos0.getNonDfsUsed(), dnInfos3.getNonDfsUsed());
}
use of org.apache.hadoop.hdfs.protocol.DatanodeInfo.DatanodeInfoBuilder in project hadoop by apache.
the class StripedBlockWriter method init.
/**
* Initialize output/input streams for transferring data to target
* and send create block request.
*/
private void init() throws IOException {
Socket socket = null;
DataOutputStream out = null;
DataInputStream in = null;
boolean success = false;
try {
InetSocketAddress targetAddr = stripedWriter.getSocketAddress4Transfer(target);
socket = datanode.newSocket();
NetUtils.connect(socket, targetAddr, datanode.getDnConf().getSocketTimeout());
socket.setTcpNoDelay(datanode.getDnConf().getDataTransferServerTcpNoDelay());
socket.setSoTimeout(datanode.getDnConf().getSocketTimeout());
Token<BlockTokenIdentifier> blockToken = datanode.getBlockAccessToken(block, EnumSet.of(BlockTokenIdentifier.AccessMode.WRITE));
long writeTimeout = datanode.getDnConf().getSocketWriteTimeout();
OutputStream unbufOut = NetUtils.getOutputStream(socket, writeTimeout);
InputStream unbufIn = NetUtils.getInputStream(socket);
DataEncryptionKeyFactory keyFactory = datanode.getDataEncryptionKeyFactoryForBlock(block);
IOStreamPair saslStreams = datanode.getSaslClient().socketSend(socket, unbufOut, unbufIn, keyFactory, blockToken, target);
unbufOut = saslStreams.out;
unbufIn = saslStreams.in;
out = new DataOutputStream(new BufferedOutputStream(unbufOut, DFSUtilClient.getSmallBufferSize(conf)));
in = new DataInputStream(unbufIn);
DatanodeInfo source = new DatanodeInfoBuilder().setNodeID(datanode.getDatanodeId()).build();
new Sender(out).writeBlock(block, storageType, blockToken, "", new DatanodeInfo[] { target }, new StorageType[] { storageType }, source, BlockConstructionStage.PIPELINE_SETUP_CREATE, 0, 0, 0, 0, stripedWriter.getChecksum(), stripedWriter.getCachingStrategy(), false, false, null);
targetSocket = socket;
targetOutputStream = out;
targetInputStream = in;
success = true;
} finally {
if (!success) {
IOUtils.closeStream(out);
IOUtils.closeStream(in);
IOUtils.closeStream(socket);
}
}
}
use of org.apache.hadoop.hdfs.protocol.DatanodeInfo.DatanodeInfoBuilder in project hadoop by apache.
the class ReportBadBlockAction method reportTo.
@Override
public void reportTo(DatanodeProtocolClientSideTranslatorPB bpNamenode, DatanodeRegistration bpRegistration) throws BPServiceActorActionException {
if (bpRegistration == null) {
return;
}
DatanodeInfo[] dnArr = { new DatanodeInfoBuilder().setNodeID(bpRegistration).build() };
String[] uuids = { storageUuid };
StorageType[] types = { storageType };
LocatedBlock[] locatedBlock = { new LocatedBlock(block, dnArr, uuids, types) };
try {
bpNamenode.reportBadBlocks(locatedBlock);
} catch (RemoteException re) {
DataNode.LOG.info("reportBadBlock encountered RemoteException for " + "block: " + block, re);
} catch (IOException e) {
throw new BPServiceActorActionException("Failed to report bad block " + block + " to namenode.", e);
}
}
use of org.apache.hadoop.hdfs.protocol.DatanodeInfo.DatanodeInfoBuilder in project hadoop by apache.
the class TestBlockReaderFactory method testShortCircuitReadFromServerWithoutShm.
/**
* Test that a client which supports short-circuit reads using
* shared memory can fall back to not using shared memory when
* the server doesn't support it.
*/
@Test
public void testShortCircuitReadFromServerWithoutShm() throws Exception {
TemporarySocketDirectory sockDir = new TemporarySocketDirectory();
Configuration clientConf = createShortCircuitConf("testShortCircuitReadFromServerWithoutShm", sockDir);
Configuration serverConf = new Configuration(clientConf);
serverConf.setInt(DFS_SHORT_CIRCUIT_SHARED_MEMORY_WATCHER_INTERRUPT_CHECK_MS, 0);
DFSInputStream.tcpReadsDisabledForTesting = true;
final MiniDFSCluster cluster = new MiniDFSCluster.Builder(serverConf).numDataNodes(1).build();
cluster.waitActive();
clientConf.set(DFS_CLIENT_CONTEXT, "testShortCircuitReadFromServerWithoutShm_clientContext");
final DistributedFileSystem fs = (DistributedFileSystem) FileSystem.get(cluster.getURI(0), clientConf);
final String TEST_FILE = "/test_file";
final int TEST_FILE_LEN = 4000;
final int SEED = 0xFADEC;
DFSTestUtil.createFile(fs, new Path(TEST_FILE), TEST_FILE_LEN, (short) 1, SEED);
byte[] contents = DFSTestUtil.readFileBuffer(fs, new Path(TEST_FILE));
byte[] expected = DFSTestUtil.calculateFileContentsFromSeed(SEED, TEST_FILE_LEN);
Assert.assertTrue(Arrays.equals(contents, expected));
final ShortCircuitCache cache = fs.getClient().getClientContext().getShortCircuitCache();
final DatanodeInfo datanode = new DatanodeInfoBuilder().setNodeID(cluster.getDataNodes().get(0).getDatanodeId()).build();
cache.getDfsClientShmManager().visit(new Visitor() {
@Override
public void visit(HashMap<DatanodeInfo, PerDatanodeVisitorInfo> info) throws IOException {
Assert.assertEquals(1, info.size());
PerDatanodeVisitorInfo vinfo = info.get(datanode);
Assert.assertTrue(vinfo.disabled);
Assert.assertEquals(0, vinfo.full.size());
Assert.assertEquals(0, vinfo.notFull.size());
}
});
cluster.shutdown();
sockDir.close();
}
use of org.apache.hadoop.hdfs.protocol.DatanodeInfo.DatanodeInfoBuilder in project hadoop by apache.
the class PBHelperClient method convert.
public static DatanodeInfo convert(DatanodeInfoProto di) {
if (di == null) {
return null;
}
DatanodeInfoBuilder dinfo = new DatanodeInfoBuilder().setNodeID(convert(di.getId())).setNetworkLocation(di.hasLocation() ? di.getLocation() : null).setCapacity(di.getCapacity()).setDfsUsed(di.getDfsUsed()).setRemaining(di.getRemaining()).setBlockPoolUsed(di.getBlockPoolUsed()).setCacheCapacity(di.getCacheCapacity()).setCacheUsed(di.getCacheUsed()).setLastUpdate(di.getLastUpdate()).setLastUpdateMonotonic(di.getLastUpdateMonotonic()).setXceiverCount(di.getXceiverCount()).setAdminState(convert(di.getAdminState())).setUpgradeDomain(di.hasUpgradeDomain() ? di.getUpgradeDomain() : null).setLastBlockReportTime(di.hasLastBlockReportTime() ? di.getLastBlockReportTime() : 0).setLastBlockReportMonotonic(di.hasLastBlockReportMonotonic() ? di.getLastBlockReportMonotonic() : 0);
if (di.hasNonDfsUsed()) {
dinfo.setNonDfsUsed(di.getNonDfsUsed());
} else {
// use the legacy way for older datanodes
long nonDFSUsed = di.getCapacity() - di.getDfsUsed() - di.getRemaining();
dinfo.setNonDfsUsed(nonDFSUsed < 0 ? 0 : nonDFSUsed);
}
return dinfo.build();
}
Aggregations