use of org.apache.hadoop.hdfs.protocol.Block in project hadoop by apache.
the class TestBlockToken method testBlockTokenRpcLeak.
/**
* Test that fast repeated invocations of createClientDatanodeProtocolProxy
* will not end up using up thousands of sockets. This is a regression test
* for HDFS-1965.
*/
private void testBlockTokenRpcLeak(boolean enableProtobuf) throws Exception {
Configuration conf = new Configuration();
conf.set(HADOOP_SECURITY_AUTHENTICATION, "kerberos");
UserGroupInformation.setConfiguration(conf);
Assume.assumeTrue(FD_DIR.exists());
BlockTokenSecretManager sm = new BlockTokenSecretManager(blockKeyUpdateInterval, blockTokenLifetime, 0, 1, "fake-pool", null, enableProtobuf);
Token<BlockTokenIdentifier> token = sm.generateToken(block3, EnumSet.allOf(BlockTokenIdentifier.AccessMode.class));
final Server server = createMockDatanode(sm, token, conf);
server.start();
final InetSocketAddress addr = NetUtils.getConnectAddress(server);
DatanodeID fakeDnId = DFSTestUtil.getLocalDatanodeID(addr.getPort());
ExtendedBlock b = new ExtendedBlock("fake-pool", new Block(12345L));
LocatedBlock fakeBlock = new LocatedBlock(b, new DatanodeInfo[0]);
fakeBlock.setBlockToken(token);
// Create another RPC proxy with the same configuration - this will never
// attempt to connect anywhere -- but it causes the refcount on the
// RPC "Client" object to stay above 0 such that RPC.stopProxy doesn't
// actually close the TCP connections to the real target DN.
ClientDatanodeProtocol proxyToNoWhere = RPC.getProxy(ClientDatanodeProtocol.class, ClientDatanodeProtocol.versionID, new InetSocketAddress("1.1.1.1", 1), UserGroupInformation.createRemoteUser("junk"), conf, NetUtils.getDefaultSocketFactory(conf));
ClientDatanodeProtocol proxy = null;
int fdsAtStart = countOpenFileDescriptors();
try {
long endTime = Time.now() + 3000;
while (Time.now() < endTime) {
proxy = DFSUtilClient.createClientDatanodeProtocolProxy(fakeDnId, conf, 1000, false, fakeBlock);
assertEquals(block3.getBlockId(), proxy.getReplicaVisibleLength(block3));
if (proxy != null) {
RPC.stopProxy(proxy);
}
LOG.info("Num open fds:" + countOpenFileDescriptors());
}
int fdsAtEnd = countOpenFileDescriptors();
if (fdsAtEnd - fdsAtStart > 50) {
fail("Leaked " + (fdsAtEnd - fdsAtStart) + " fds!");
}
} finally {
server.stop();
}
RPC.stopProxy(proxyToNoWhere);
}
use of org.apache.hadoop.hdfs.protocol.Block in project hadoop by apache.
the class TestPBHelper method testConvertBlockCommand.
@Test
public void testConvertBlockCommand() {
Block[] blocks = new Block[] { new Block(21), new Block(22) };
DatanodeInfo[][] dnInfos = new DatanodeInfo[][] { new DatanodeInfo[1], new DatanodeInfo[2] };
dnInfos[0][0] = DFSTestUtil.getLocalDatanodeInfo();
dnInfos[1][0] = DFSTestUtil.getLocalDatanodeInfo();
dnInfos[1][1] = DFSTestUtil.getLocalDatanodeInfo();
String[][] storageIDs = { { "s00" }, { "s10", "s11" } };
StorageType[][] storageTypes = { { StorageType.DEFAULT }, { StorageType.DEFAULT, StorageType.DEFAULT } };
BlockCommand bc = new BlockCommand(DatanodeProtocol.DNA_TRANSFER, "bp1", blocks, dnInfos, storageTypes, storageIDs);
BlockCommandProto bcProto = PBHelper.convert(bc);
BlockCommand bc2 = PBHelper.convert(bcProto);
assertEquals(bc.getAction(), bc2.getAction());
assertEquals(bc.getBlocks().length, bc2.getBlocks().length);
Block[] blocks2 = bc2.getBlocks();
for (int i = 0; i < blocks.length; i++) {
assertEquals(blocks[i], blocks2[i]);
}
DatanodeInfo[][] dnInfos2 = bc2.getTargets();
assertEquals(dnInfos.length, dnInfos2.length);
for (int i = 0; i < dnInfos.length; i++) {
DatanodeInfo[] d1 = dnInfos[i];
DatanodeInfo[] d2 = dnInfos2[i];
assertEquals(d1.length, d2.length);
for (int j = 0; j < d1.length; j++) {
compare(d1[j], d2[j]);
}
}
}
use of org.apache.hadoop.hdfs.protocol.Block in project hadoop by apache.
the class TestReplicaMap method testRemove.
@Test
public void testRemove() {
// Test 1: null argument throws invalid argument exception
try {
map.remove(bpid, null);
fail("Expected exception not thrown");
} catch (IllegalArgumentException expected) {
}
// Test 2: remove failure - generation stamp mismatch
Block b = new Block(block);
b.setGenerationStamp(0);
assertNull(map.remove(bpid, b));
// Test 3: remove failure - blockID mismatch
b.setGenerationStamp(block.getGenerationStamp());
b.setBlockId(0);
assertNull(map.remove(bpid, b));
// Test 4: remove success
assertNotNull(map.remove(bpid, block));
// Test 5: remove failure - invalid blockID
assertNull(map.remove(bpid, 0));
// Test 6: remove success
map.add(bpid, new FinalizedReplica(block, null, null));
assertNotNull(map.remove(bpid, block.getBlockId()));
}
use of org.apache.hadoop.hdfs.protocol.Block in project hadoop by apache.
the class TestReplicaMap method testGet.
/**
* Test for ReplicasMap.get(Block) and ReplicasMap.get(long) tests
*/
@Test
public void testGet() {
// Test 1: null argument throws invalid argument exception
try {
map.get(bpid, null);
fail("Expected exception not thrown");
} catch (IllegalArgumentException expected) {
}
// Test 2: successful lookup based on block
assertNotNull(map.get(bpid, block));
// Test 3: Lookup failure - generation stamp mismatch
Block b = new Block(block);
b.setGenerationStamp(0);
assertNull(map.get(bpid, b));
// Test 4: Lookup failure - blockID mismatch
b.setGenerationStamp(block.getGenerationStamp());
b.setBlockId(0);
assertNull(map.get(bpid, b));
// Test 5: successful lookup based on block ID
assertNotNull(map.get(bpid, block.getBlockId()));
// Test 6: failed lookup for invalid block ID
assertNull(map.get(bpid, 0));
}
use of org.apache.hadoop.hdfs.protocol.Block in project hadoop by apache.
the class TestSpaceReservation method testTmpSpaceReserve.
@Test(timeout = 300000)
public void testTmpSpaceReserve() throws Exception {
final short replication = 2;
startCluster(BLOCK_SIZE, replication, -1);
final int byteCount1 = 100;
final int byteCount2 = 200;
final String methodName = GenericTestUtils.getMethodName();
// Test positive scenario
{
final Path file = new Path("/" + methodName + ".01.dat");
try (FSDataOutputStream os = fs.create(file, (short) 1)) {
// Write test data to the file
os.write(new byte[byteCount1]);
os.hsync();
}
BlockLocation[] blockLocations = fs.getFileBlockLocations(file, 0, 10);
String firstReplicaNode = blockLocations[0].getNames()[0];
int newReplicaDNIndex = 0;
if (firstReplicaNode.equals(cluster.getDataNodes().get(0).getDisplayName())) {
newReplicaDNIndex = 1;
}
FsVolumeImpl fsVolumeImpl = (FsVolumeImpl) cluster.getDataNodes().get(newReplicaDNIndex).getFSDataset().getFsVolumeReferences().get(0);
performReReplication(file, true);
assertEquals("Wrong reserve space for Tmp ", byteCount1, fsVolumeImpl.getRecentReserved());
assertEquals("Reserved Tmp space is not released", 0, fsVolumeImpl.getReservedForReplicas());
}
// Test when file creation fails
{
final Path file = new Path("/" + methodName + ".01.dat");
try (FSDataOutputStream os = fs.create(file, (short) 1)) {
// Write test data to the file
os.write(new byte[byteCount2]);
os.hsync();
}
BlockLocation[] blockLocations = fs.getFileBlockLocations(file, 0, 10);
String firstReplicaNode = blockLocations[0].getNames()[0];
int newReplicaDNIndex = 0;
if (firstReplicaNode.equals(cluster.getDataNodes().get(0).getDisplayName())) {
newReplicaDNIndex = 1;
}
BlockPoolSlice blockPoolSlice = Mockito.mock(BlockPoolSlice.class);
Mockito.when(blockPoolSlice.createTmpFile((Block) Mockito.any())).thenThrow(new IOException("Synthetic IO Exception Throgh MOCK"));
final FsVolumeImpl fsVolumeImpl = (FsVolumeImpl) cluster.getDataNodes().get(newReplicaDNIndex).getFSDataset().getFsVolumeReferences().get(0);
// Reserve some bytes to verify double clearing space should't happen
fsVolumeImpl.reserveSpaceForReplica(1000);
Field field = FsVolumeImpl.class.getDeclaredField("bpSlices");
field.setAccessible(true);
@SuppressWarnings("unchecked") Map<String, BlockPoolSlice> bpSlices = (Map<String, BlockPoolSlice>) field.get(fsVolumeImpl);
bpSlices.put(fsVolumeImpl.getBlockPoolList()[0], blockPoolSlice);
performReReplication(file, false);
assertEquals("Wrong reserve space for Tmp ", byteCount2, fsVolumeImpl.getRecentReserved());
assertEquals("Tmp space is not released OR released twice", 1000, fsVolumeImpl.getReservedForReplicas());
}
}
Aggregations