use of org.apache.hadoop.hdfs.protocol.ExtendedBlock in project hadoop by apache.
the class TestBlockToken method testBlockTokenRpcLeak.
/**
* Test that fast repeated invocations of createClientDatanodeProtocolProxy
* will not end up using up thousands of sockets. This is a regression test
* for HDFS-1965.
*/
private void testBlockTokenRpcLeak(boolean enableProtobuf) throws Exception {
Configuration conf = new Configuration();
conf.set(HADOOP_SECURITY_AUTHENTICATION, "kerberos");
UserGroupInformation.setConfiguration(conf);
Assume.assumeTrue(FD_DIR.exists());
BlockTokenSecretManager sm = new BlockTokenSecretManager(blockKeyUpdateInterval, blockTokenLifetime, 0, 1, "fake-pool", null, enableProtobuf);
Token<BlockTokenIdentifier> token = sm.generateToken(block3, EnumSet.allOf(BlockTokenIdentifier.AccessMode.class));
final Server server = createMockDatanode(sm, token, conf);
server.start();
final InetSocketAddress addr = NetUtils.getConnectAddress(server);
DatanodeID fakeDnId = DFSTestUtil.getLocalDatanodeID(addr.getPort());
ExtendedBlock b = new ExtendedBlock("fake-pool", new Block(12345L));
LocatedBlock fakeBlock = new LocatedBlock(b, new DatanodeInfo[0]);
fakeBlock.setBlockToken(token);
// Create another RPC proxy with the same configuration - this will never
// attempt to connect anywhere -- but it causes the refcount on the
// RPC "Client" object to stay above 0 such that RPC.stopProxy doesn't
// actually close the TCP connections to the real target DN.
ClientDatanodeProtocol proxyToNoWhere = RPC.getProxy(ClientDatanodeProtocol.class, ClientDatanodeProtocol.versionID, new InetSocketAddress("1.1.1.1", 1), UserGroupInformation.createRemoteUser("junk"), conf, NetUtils.getDefaultSocketFactory(conf));
ClientDatanodeProtocol proxy = null;
int fdsAtStart = countOpenFileDescriptors();
try {
long endTime = Time.now() + 3000;
while (Time.now() < endTime) {
proxy = DFSUtilClient.createClientDatanodeProtocolProxy(fakeDnId, conf, 1000, false, fakeBlock);
assertEquals(block3.getBlockId(), proxy.getReplicaVisibleLength(block3));
if (proxy != null) {
RPC.stopProxy(proxy);
}
LOG.info("Num open fds:" + countOpenFileDescriptors());
}
int fdsAtEnd = countOpenFileDescriptors();
if (fdsAtEnd - fdsAtStart > 50) {
fail("Leaked " + (fdsAtEnd - fdsAtStart) + " fds!");
}
} finally {
server.stop();
}
RPC.stopProxy(proxyToNoWhere);
}
use of org.apache.hadoop.hdfs.protocol.ExtendedBlock in project hadoop by apache.
the class TestPBHelper method testBlockECRecoveryCommand.
@Test
public void testBlockECRecoveryCommand() {
DatanodeInfo[] dnInfos0 = new DatanodeInfo[] { DFSTestUtil.getLocalDatanodeInfo(), DFSTestUtil.getLocalDatanodeInfo() };
DatanodeStorageInfo targetDnInfos_0 = BlockManagerTestUtil.newDatanodeStorageInfo(DFSTestUtil.getLocalDatanodeDescriptor(), new DatanodeStorage("s00"));
DatanodeStorageInfo targetDnInfos_1 = BlockManagerTestUtil.newDatanodeStorageInfo(DFSTestUtil.getLocalDatanodeDescriptor(), new DatanodeStorage("s01"));
DatanodeStorageInfo[] targetDnInfos0 = new DatanodeStorageInfo[] { targetDnInfos_0, targetDnInfos_1 };
byte[] liveBlkIndices0 = new byte[2];
BlockECReconstructionInfo blkECRecoveryInfo0 = new BlockECReconstructionInfo(new ExtendedBlock("bp1", 1234), dnInfos0, targetDnInfos0, liveBlkIndices0, StripedFileTestUtil.getDefaultECPolicy());
DatanodeInfo[] dnInfos1 = new DatanodeInfo[] { DFSTestUtil.getLocalDatanodeInfo(), DFSTestUtil.getLocalDatanodeInfo() };
DatanodeStorageInfo targetDnInfos_2 = BlockManagerTestUtil.newDatanodeStorageInfo(DFSTestUtil.getLocalDatanodeDescriptor(), new DatanodeStorage("s02"));
DatanodeStorageInfo targetDnInfos_3 = BlockManagerTestUtil.newDatanodeStorageInfo(DFSTestUtil.getLocalDatanodeDescriptor(), new DatanodeStorage("s03"));
DatanodeStorageInfo[] targetDnInfos1 = new DatanodeStorageInfo[] { targetDnInfos_2, targetDnInfos_3 };
byte[] liveBlkIndices1 = new byte[2];
BlockECReconstructionInfo blkECRecoveryInfo1 = new BlockECReconstructionInfo(new ExtendedBlock("bp2", 3256), dnInfos1, targetDnInfos1, liveBlkIndices1, StripedFileTestUtil.getDefaultECPolicy());
List<BlockECReconstructionInfo> blkRecoveryInfosList = new ArrayList<BlockECReconstructionInfo>();
blkRecoveryInfosList.add(blkECRecoveryInfo0);
blkRecoveryInfosList.add(blkECRecoveryInfo1);
BlockECReconstructionCommand blkECReconstructionCmd = new BlockECReconstructionCommand(DatanodeProtocol.DNA_ERASURE_CODING_RECONSTRUCTION, blkRecoveryInfosList);
BlockECReconstructionCommandProto blkECRecoveryCmdProto = PBHelper.convert(blkECReconstructionCmd);
blkECReconstructionCmd = PBHelper.convert(blkECRecoveryCmdProto);
Iterator<BlockECReconstructionInfo> iterator = blkECReconstructionCmd.getECTasks().iterator();
assertBlockECRecoveryInfoEquals(blkECRecoveryInfo0, iterator.next());
assertBlockECRecoveryInfoEquals(blkECRecoveryInfo1, iterator.next());
}
use of org.apache.hadoop.hdfs.protocol.ExtendedBlock in project hadoop by apache.
the class TestPBHelper method createLocatedBlockNoStorageMedia.
private LocatedBlock createLocatedBlockNoStorageMedia() {
DatanodeInfo[] dnInfos = { DFSTestUtil.getLocalDatanodeInfo("127.0.0.1", "h1", AdminStates.DECOMMISSION_INPROGRESS), DFSTestUtil.getLocalDatanodeInfo("127.0.0.1", "h2", AdminStates.DECOMMISSIONED), DFSTestUtil.getLocalDatanodeInfo("127.0.0.1", "h3", AdminStates.NORMAL) };
LocatedBlock lb = new LocatedBlock(new ExtendedBlock("bp12", 12345, 10, 53), dnInfos);
lb.setBlockToken(new Token<BlockTokenIdentifier>("identifier".getBytes(), "password".getBytes(), new Text("kind"), new Text("service")));
lb.setStartOffset(5);
return lb;
}
use of org.apache.hadoop.hdfs.protocol.ExtendedBlock in project hadoop by apache.
the class TestPBHelper method createLocatedBlock.
private LocatedBlock createLocatedBlock() {
DatanodeInfo[] dnInfos = { DFSTestUtil.getLocalDatanodeInfo("127.0.0.1", "h1", AdminStates.DECOMMISSION_INPROGRESS), DFSTestUtil.getLocalDatanodeInfo("127.0.0.1", "h2", AdminStates.DECOMMISSIONED), DFSTestUtil.getLocalDatanodeInfo("127.0.0.1", "h3", AdminStates.NORMAL), DFSTestUtil.getLocalDatanodeInfo("127.0.0.1", "h4", AdminStates.NORMAL) };
String[] storageIDs = { "s1", "s2", "s3", "s4" };
StorageType[] media = { StorageType.DISK, StorageType.SSD, StorageType.DISK, StorageType.RAM_DISK };
LocatedBlock lb = new LocatedBlock(new ExtendedBlock("bp12", 12345, 10, 53), dnInfos, storageIDs, media, 5, false, new DatanodeInfo[] {});
lb.setBlockToken(new Token<BlockTokenIdentifier>("identifier".getBytes(), "password".getBytes(), new Text("kind"), new Text("service")));
return lb;
}
use of org.apache.hadoop.hdfs.protocol.ExtendedBlock in project hadoop by apache.
the class TestWriteToReplica method createReplicas.
private void createReplicas(List<String> bpList, List<FsVolumeSpi> volumes, FsDatasetTestUtils testUtils) throws IOException {
// Here we create all different type of replicas and add it
// to volume map.
// Created all type of ReplicaInfo, each under Blkpool corresponding volume
// This variable is used as both blockId and genStamp
long id = 1;
for (String bpId : bpList) {
for (FsVolumeSpi volume : volumes) {
ExtendedBlock eb = new ExtendedBlock(bpId, id, 1, id);
testUtils.createFinalizedReplica(volume, eb);
id++;
eb = new ExtendedBlock(bpId, id, 1, id);
testUtils.createRBW(volume, eb);
id++;
eb = new ExtendedBlock(bpId, id, 1, id);
testUtils.createReplicaWaitingToBeRecovered(volume, eb);
id++;
eb = new ExtendedBlock(bpId, id, 1, id);
testUtils.createReplicaInPipeline(volume, eb);
id++;
}
}
}
Aggregations