use of org.apache.hadoop.hdfs.protocol.DatanodeID in project hadoop by apache.
the class TestPBHelper method testConvertDatanodeRegistration.
@Test
public void testConvertDatanodeRegistration() {
DatanodeID dnId = DFSTestUtil.getLocalDatanodeID();
BlockKey[] keys = new BlockKey[] { getBlockKey(2), getBlockKey(3) };
ExportedBlockKeys expKeys = new ExportedBlockKeys(true, 9, 10, getBlockKey(1), keys);
DatanodeRegistration reg = new DatanodeRegistration(dnId, new StorageInfo(NodeType.DATA_NODE), expKeys, "3.0.0");
DatanodeRegistrationProto proto = PBHelper.convert(reg);
DatanodeRegistration reg2 = PBHelper.convert(proto);
compare(reg.getStorageInfo(), reg2.getStorageInfo());
compare(reg.getExportedKeys(), reg2.getExportedKeys());
compare(reg, reg2);
assertEquals(reg.getSoftwareVersion(), reg2.getSoftwareVersion());
}
use of org.apache.hadoop.hdfs.protocol.DatanodeID in project hadoop by apache.
the class TestBlockToken method testBlockTokenRpcLeak.
/**
* Test that fast repeated invocations of createClientDatanodeProtocolProxy
* will not end up using up thousands of sockets. This is a regression test
* for HDFS-1965.
*/
private void testBlockTokenRpcLeak(boolean enableProtobuf) throws Exception {
Configuration conf = new Configuration();
conf.set(HADOOP_SECURITY_AUTHENTICATION, "kerberos");
UserGroupInformation.setConfiguration(conf);
Assume.assumeTrue(FD_DIR.exists());
BlockTokenSecretManager sm = new BlockTokenSecretManager(blockKeyUpdateInterval, blockTokenLifetime, 0, 1, "fake-pool", null, enableProtobuf);
Token<BlockTokenIdentifier> token = sm.generateToken(block3, EnumSet.allOf(BlockTokenIdentifier.AccessMode.class));
final Server server = createMockDatanode(sm, token, conf);
server.start();
final InetSocketAddress addr = NetUtils.getConnectAddress(server);
DatanodeID fakeDnId = DFSTestUtil.getLocalDatanodeID(addr.getPort());
ExtendedBlock b = new ExtendedBlock("fake-pool", new Block(12345L));
LocatedBlock fakeBlock = new LocatedBlock(b, new DatanodeInfo[0]);
fakeBlock.setBlockToken(token);
// Create another RPC proxy with the same configuration - this will never
// attempt to connect anywhere -- but it causes the refcount on the
// RPC "Client" object to stay above 0 such that RPC.stopProxy doesn't
// actually close the TCP connections to the real target DN.
ClientDatanodeProtocol proxyToNoWhere = RPC.getProxy(ClientDatanodeProtocol.class, ClientDatanodeProtocol.versionID, new InetSocketAddress("1.1.1.1", 1), UserGroupInformation.createRemoteUser("junk"), conf, NetUtils.getDefaultSocketFactory(conf));
ClientDatanodeProtocol proxy = null;
int fdsAtStart = countOpenFileDescriptors();
try {
long endTime = Time.now() + 3000;
while (Time.now() < endTime) {
proxy = DFSUtilClient.createClientDatanodeProtocolProxy(fakeDnId, conf, 1000, false, fakeBlock);
assertEquals(block3.getBlockId(), proxy.getReplicaVisibleLength(block3));
if (proxy != null) {
RPC.stopProxy(proxy);
}
LOG.info("Num open fds:" + countOpenFileDescriptors());
}
int fdsAtEnd = countOpenFileDescriptors();
if (fdsAtEnd - fdsAtStart > 50) {
fail("Leaked " + (fdsAtEnd - fdsAtStart) + " fds!");
}
} finally {
server.stop();
}
RPC.stopProxy(proxyToNoWhere);
}
use of org.apache.hadoop.hdfs.protocol.DatanodeID in project hadoop by apache.
the class TestPBHelper method testDataNodeInfoPBHelper.
@Test
public void testDataNodeInfoPBHelper() {
DatanodeID id = DFSTestUtil.getLocalDatanodeID();
DatanodeInfo dnInfos0 = new DatanodeInfoBuilder().setNodeID(id).build();
dnInfos0.setCapacity(3500L);
dnInfos0.setDfsUsed(1000L);
dnInfos0.setNonDfsUsed(2000L);
dnInfos0.setRemaining(500L);
HdfsProtos.DatanodeInfoProto dnproto = PBHelperClient.convert(dnInfos0);
DatanodeInfo dnInfos1 = PBHelperClient.convert(dnproto);
compare(dnInfos0, dnInfos1);
assertEquals(dnInfos0.getNonDfsUsed(), dnInfos1.getNonDfsUsed());
//Testing without nonDfs field
HdfsProtos.DatanodeInfoProto.Builder b = HdfsProtos.DatanodeInfoProto.newBuilder();
b.setId(PBHelperClient.convert(id)).setCapacity(3500L).setDfsUsed(1000L).setRemaining(500L);
DatanodeInfo dnInfos3 = PBHelperClient.convert(b.build());
assertEquals(dnInfos0.getNonDfsUsed(), dnInfos3.getNonDfsUsed());
}
use of org.apache.hadoop.hdfs.protocol.DatanodeID in project hadoop by apache.
the class TestPBHelper method testConvertDatanodeID.
@Test
public void testConvertDatanodeID() {
DatanodeID dn = DFSTestUtil.getLocalDatanodeID();
DatanodeIDProto dnProto = PBHelperClient.convert(dn);
DatanodeID dn2 = PBHelperClient.convert(dnProto);
compare(dn, dn2);
}
use of org.apache.hadoop.hdfs.protocol.DatanodeID in project hadoop by apache.
the class TestFsck method testFsckMissingECFile.
@Test(timeout = 300000)
public void testFsckMissingECFile() throws Exception {
DistributedFileSystem fs = null;
int dataBlocks = StripedFileTestUtil.getDefaultECPolicy().getNumDataUnits();
int parityBlocks = StripedFileTestUtil.getDefaultECPolicy().getNumParityUnits();
int cellSize = StripedFileTestUtil.getDefaultECPolicy().getCellSize();
int totalSize = dataBlocks + parityBlocks;
conf.set(DFSConfigKeys.DFS_NAMENODE_EC_POLICIES_ENABLED_KEY, StripedFileTestUtil.getDefaultECPolicy().getName());
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(totalSize).build();
fs = cluster.getFileSystem();
// create file
Path ecDirPath = new Path("/striped");
fs.mkdir(ecDirPath, FsPermission.getDirDefault());
fs.getClient().setErasureCodingPolicy(ecDirPath.toString(), StripedFileTestUtil.getDefaultECPolicy().getName());
Path file = new Path(ecDirPath, "missing");
final int length = cellSize * dataBlocks;
final byte[] bytes = StripedFileTestUtil.generateBytes(length);
DFSTestUtil.writeFile(fs, file, bytes);
// make an unrecoverable ec file with missing blocks
ArrayList<DataNode> dns = cluster.getDataNodes();
DatanodeID dnId;
for (int i = 0; i < parityBlocks + 1; i++) {
dnId = dns.get(i).getDatanodeId();
cluster.stopDataNode(dnId.getXferAddr());
cluster.setDataNodeDead(dnId);
}
waitForUnrecoverableBlockGroup(conf);
String outStr = runFsck(conf, 1, true, "/", "-files", "-blocks", "-locations");
assertTrue(outStr.contains(NamenodeFsck.CORRUPT_STATUS));
assertTrue(outStr.contains("Live_repl=" + (dataBlocks - 1)));
assertTrue(outStr.contains("Under-erasure-coded block groups:\t0"));
outStr = runFsck(conf, -1, true, "/", "-list-corruptfileblocks");
assertTrue(outStr.contains("has 1 CORRUPT files"));
}
Aggregations