use of org.apache.hadoop.hdfs.protocol.DatanodeID in project hadoop by apache.
the class TestDataNodeVolumeFailure method accessBlock.
/**
* try to access a block on a data node. If fails - throws exception
* @param datanode
* @param lblock
* @throws IOException
*/
private void accessBlock(DatanodeInfo datanode, LocatedBlock lblock) throws IOException {
InetSocketAddress targetAddr = null;
ExtendedBlock block = lblock.getBlock();
targetAddr = NetUtils.createSocketAddr(datanode.getXferAddr());
BlockReader blockReader = new BlockReaderFactory(new DfsClientConf(conf)).setInetSocketAddress(targetAddr).setBlock(block).setFileName(BlockReaderFactory.getFileName(targetAddr, "test-blockpoolid", block.getBlockId())).setBlockToken(lblock.getBlockToken()).setStartOffset(0).setLength(0).setVerifyChecksum(true).setClientName("TestDataNodeVolumeFailure").setDatanodeInfo(datanode).setCachingStrategy(CachingStrategy.newDefaultStrategy()).setClientCacheContext(ClientContext.getFromConf(conf)).setConfiguration(conf).setTracer(FsTracer.get(conf)).setRemotePeerFactory(new RemotePeerFactory() {
@Override
public Peer newConnectedPeer(InetSocketAddress addr, Token<BlockTokenIdentifier> blockToken, DatanodeID datanodeId) throws IOException {
Peer peer = null;
Socket sock = NetUtils.getDefaultSocketFactory(conf).createSocket();
try {
sock.connect(addr, HdfsConstants.READ_TIMEOUT);
sock.setSoTimeout(HdfsConstants.READ_TIMEOUT);
peer = DFSUtilClient.peerFromSocket(sock);
} finally {
if (peer == null) {
IOUtils.closeSocket(sock);
}
}
return peer;
}
}).build();
blockReader.close();
}
use of org.apache.hadoop.hdfs.protocol.DatanodeID in project hadoop by apache.
the class TestInterDatanodeProtocol method testInterDNProtocolTimeout.
/** Test to verify that InterDatanode RPC timesout as expected when
* the server DN does not respond.
*/
@Test(expected = SocketTimeoutException.class)
public void testInterDNProtocolTimeout() throws Throwable {
final Server server = new TestServer(1, true);
server.start();
final InetSocketAddress addr = NetUtils.getConnectAddress(server);
DatanodeID fakeDnId = DFSTestUtil.getLocalDatanodeID(addr.getPort());
DatanodeInfo dInfo = new DatanodeInfoBuilder().setNodeID(fakeDnId).build();
InterDatanodeProtocol proxy = null;
try {
proxy = DataNode.createInterDataNodeProtocolProxy(dInfo, conf, 500, false);
proxy.initReplicaRecovery(new RecoveringBlock(new ExtendedBlock("bpid", 1), null, 100));
fail("Expected SocketTimeoutException exception, but did not get.");
} finally {
if (proxy != null) {
RPC.stopProxy(proxy);
}
server.stop();
}
}
use of org.apache.hadoop.hdfs.protocol.DatanodeID in project hadoop by apache.
the class TestDeleteRace method testDeleteAndCommitBlockSynchronizationRace.
/**
* Test race between delete operation and commitBlockSynchronization method.
* See HDFS-6825.
* @param hasSnapshot
* @throws Exception
*/
private void testDeleteAndCommitBlockSynchronizationRace(boolean hasSnapshot) throws Exception {
LOG.info("Start testing, hasSnapshot: " + hasSnapshot);
ArrayList<AbstractMap.SimpleImmutableEntry<String, Boolean>> testList = new ArrayList<AbstractMap.SimpleImmutableEntry<String, Boolean>>();
testList.add(new AbstractMap.SimpleImmutableEntry<String, Boolean>("/test-file", false));
testList.add(new AbstractMap.SimpleImmutableEntry<String, Boolean>("/test-file1", true));
testList.add(new AbstractMap.SimpleImmutableEntry<String, Boolean>("/testdir/testdir1/test-file", false));
testList.add(new AbstractMap.SimpleImmutableEntry<String, Boolean>("/testdir/testdir1/test-file1", true));
final Path rootPath = new Path("/");
final Configuration conf = new Configuration();
// Disable permissions so that another user can recover the lease.
conf.setBoolean(DFSConfigKeys.DFS_PERMISSIONS_ENABLED_KEY, false);
conf.setInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCK_SIZE);
FSDataOutputStream stm = null;
Map<DataNode, DatanodeProtocolClientSideTranslatorPB> dnMap = new HashMap<DataNode, DatanodeProtocolClientSideTranslatorPB>();
try {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
cluster.waitActive();
DistributedFileSystem fs = cluster.getFileSystem();
int stId = 0;
for (AbstractMap.SimpleImmutableEntry<String, Boolean> stest : testList) {
String testPath = stest.getKey();
Boolean mkSameDir = stest.getValue();
LOG.info("test on " + testPath + " mkSameDir: " + mkSameDir + " snapshot: " + hasSnapshot);
Path fPath = new Path(testPath);
//find grandest non-root parent
Path grandestNonRootParent = fPath;
while (!grandestNonRootParent.getParent().equals(rootPath)) {
grandestNonRootParent = grandestNonRootParent.getParent();
}
stm = fs.create(fPath);
LOG.info("test on " + testPath + " created " + fPath);
// write a half block
AppendTestUtil.write(stm, 0, BLOCK_SIZE / 2);
stm.hflush();
if (hasSnapshot) {
SnapshotTestHelper.createSnapshot(fs, rootPath, "st" + String.valueOf(stId));
++stId;
}
// Look into the block manager on the active node for the block
// under construction.
NameNode nn = cluster.getNameNode();
ExtendedBlock blk = DFSTestUtil.getFirstBlock(fs, fPath);
DatanodeDescriptor expectedPrimary = DFSTestUtil.getExpectedPrimaryNode(nn, blk);
LOG.info("Expecting block recovery to be triggered on DN " + expectedPrimary);
// Find the corresponding DN daemon, and spy on its connection to the
// active.
DataNode primaryDN = cluster.getDataNode(expectedPrimary.getIpcPort());
DatanodeProtocolClientSideTranslatorPB nnSpy = dnMap.get(primaryDN);
if (nnSpy == null) {
nnSpy = InternalDataNodeTestUtils.spyOnBposToNN(primaryDN, nn);
dnMap.put(primaryDN, nnSpy);
}
// Delay the commitBlockSynchronization call
DelayAnswer delayer = new DelayAnswer(LOG);
Mockito.doAnswer(delayer).when(nnSpy).commitBlockSynchronization(Mockito.eq(blk), // new genstamp
Mockito.anyInt(), // new length
Mockito.anyLong(), // close file
Mockito.eq(true), // delete block
Mockito.eq(false), // new targets
(DatanodeID[]) Mockito.anyObject(), // new target storages
(String[]) Mockito.anyObject());
fs.recoverLease(fPath);
LOG.info("Waiting for commitBlockSynchronization call from primary");
delayer.waitForCall();
LOG.info("Deleting recursively " + grandestNonRootParent);
fs.delete(grandestNonRootParent, true);
if (mkSameDir && !grandestNonRootParent.toString().equals(testPath)) {
LOG.info("Recreate dir " + grandestNonRootParent + " testpath: " + testPath);
fs.mkdirs(grandestNonRootParent);
}
delayer.proceed();
LOG.info("Now wait for result");
delayer.waitForResult();
Throwable t = delayer.getThrown();
if (t != null) {
LOG.info("Result exception (snapshot: " + hasSnapshot + "): " + t);
}
}
// end of loop each fPath
LOG.info("Now check we can restart");
cluster.restartNameNodes();
LOG.info("Restart finished");
} finally {
if (stm != null) {
IOUtils.closeStream(stm);
}
if (cluster != null) {
cluster.shutdown();
}
}
}
use of org.apache.hadoop.hdfs.protocol.DatanodeID in project hadoop by apache.
the class TestCommitBlockSynchronization method testCommitBlockSynchronization2.
@Test
public void testCommitBlockSynchronization2() throws IOException {
INodeFile file = mockFileUnderConstruction();
Block block = new Block(blockId, length, genStamp);
FSNamesystem namesystemSpy = makeNameSystemSpy(block, file);
DatanodeID[] newTargets = new DatanodeID[0];
ExtendedBlock lastBlock = new ExtendedBlock();
namesystemSpy.commitBlockSynchronization(lastBlock, genStamp, length, false, false, newTargets, null);
// the block recovery ID.
try {
namesystemSpy.commitBlockSynchronization(lastBlock, genStamp - 1, length, false, false, newTargets, null);
fail("Failed to get expected IOException on generation stamp/" + "recovery ID mismatch");
} catch (IOException ioe) {
// Expected exception.
}
}
use of org.apache.hadoop.hdfs.protocol.DatanodeID in project hadoop by apache.
the class TestCommitBlockSynchronization method testCommitBlockSynchronizationWithClose.
@Test
public void testCommitBlockSynchronizationWithClose() throws IOException {
INodeFile file = mockFileUnderConstruction();
Block block = new Block(blockId, length, genStamp);
FSNamesystem namesystemSpy = makeNameSystemSpy(block, file);
DatanodeID[] newTargets = new DatanodeID[0];
ExtendedBlock lastBlock = new ExtendedBlock();
namesystemSpy.commitBlockSynchronization(lastBlock, genStamp, length, true, false, newTargets, null);
// Repeat the call to make sure it returns true
namesystemSpy.commitBlockSynchronization(lastBlock, genStamp, length, true, false, newTargets, null);
BlockInfo completedBlockInfo = new BlockInfoContiguous(block, (short) 1);
completedBlockInfo.setBlockCollectionId(file.getId());
completedBlockInfo.setGenerationStamp(genStamp);
doReturn(completedBlockInfo).when(namesystemSpy).getStoredBlock(any(Block.class));
doReturn(completedBlockInfo).when(file).getLastBlock();
namesystemSpy.commitBlockSynchronization(lastBlock, genStamp, length, true, false, newTargets, null);
}
Aggregations