use of org.apache.hadoop.hdfs.server.protocol.InterDatanodeProtocol in project hadoop by apache.
the class TestBlockRecovery method testFinalizedRbwReplicas.
/**
* BlockRecovery_02.9.
* One replica is Finalized and another is RBW.
* @throws IOException in case of an error
*/
@Test(timeout = 60000)
public void testFinalizedRbwReplicas() throws IOException {
if (LOG.isDebugEnabled()) {
LOG.debug("Running " + GenericTestUtils.getMethodName());
}
// rbw and finalized replicas have the same length
ReplicaRecoveryInfo replica1 = new ReplicaRecoveryInfo(BLOCK_ID, REPLICA_LEN1, GEN_STAMP - 1, ReplicaState.FINALIZED);
ReplicaRecoveryInfo replica2 = new ReplicaRecoveryInfo(BLOCK_ID, REPLICA_LEN1, GEN_STAMP - 2, ReplicaState.RBW);
InterDatanodeProtocol dn1 = mock(InterDatanodeProtocol.class);
InterDatanodeProtocol dn2 = mock(InterDatanodeProtocol.class);
testSyncReplicas(replica1, replica2, dn1, dn2, REPLICA_LEN1);
verify(dn1).updateReplicaUnderRecovery(block, RECOVERY_ID, BLOCK_ID, REPLICA_LEN1);
verify(dn2).updateReplicaUnderRecovery(block, RECOVERY_ID, BLOCK_ID, REPLICA_LEN1);
// rbw replica has a different length from the finalized one
replica1 = new ReplicaRecoveryInfo(BLOCK_ID, REPLICA_LEN1, GEN_STAMP - 1, ReplicaState.FINALIZED);
replica2 = new ReplicaRecoveryInfo(BLOCK_ID, REPLICA_LEN2, GEN_STAMP - 2, ReplicaState.RBW);
dn1 = mock(InterDatanodeProtocol.class);
dn2 = mock(InterDatanodeProtocol.class);
testSyncReplicas(replica1, replica2, dn1, dn2, REPLICA_LEN1);
verify(dn1).updateReplicaUnderRecovery(block, RECOVERY_ID, BLOCK_ID, REPLICA_LEN1);
verify(dn2, never()).updateReplicaUnderRecovery(block, RECOVERY_ID, BLOCK_ID, REPLICA_LEN1);
}
use of org.apache.hadoop.hdfs.server.protocol.InterDatanodeProtocol in project hadoop by apache.
the class TestInterDatanodeProtocol method checkBlockMetaDataInfo.
/**
* The following test first creates a file.
* It verifies the block information from a datanode.
* Then, it updates the block with new information and verifies again.
* @param useDnHostname whether DNs should connect to other DNs by hostname
*/
private void checkBlockMetaDataInfo(boolean useDnHostname) throws Exception {
MiniDFSCluster cluster = null;
conf.setBoolean(DFSConfigKeys.DFS_DATANODE_USE_DN_HOSTNAME, useDnHostname);
if (useDnHostname) {
// Since the mini cluster only listens on the loopback we have to
// ensure the hostname used to access DNs maps to the loopback. We
// do this by telling the DN to advertise localhost as its hostname
// instead of the default hostname.
conf.set(DFSConfigKeys.DFS_DATANODE_HOST_NAME_KEY, "localhost");
}
try {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).checkDataNodeHostConfig(true).build();
cluster.waitActive();
//create a file
DistributedFileSystem dfs = cluster.getFileSystem();
String filestr = "/foo";
Path filepath = new Path(filestr);
DFSTestUtil.createFile(dfs, filepath, 1024L, (short) 3, 0L);
assertTrue(dfs.exists(filepath));
//get block info
LocatedBlock locatedblock = getLastLocatedBlock(DFSClientAdapter.getDFSClient(dfs).getNamenode(), filestr);
DatanodeInfo[] datanodeinfo = locatedblock.getLocations();
assertTrue(datanodeinfo.length > 0);
//connect to a data node
DataNode datanode = cluster.getDataNode(datanodeinfo[0].getIpcPort());
InterDatanodeProtocol idp = DataNodeTestUtils.createInterDatanodeProtocolProxy(datanode, datanodeinfo[0], conf, useDnHostname);
// Stop the block scanners.
datanode.getBlockScanner().removeAllVolumeScanners();
//verify BlockMetaDataInfo
ExtendedBlock b = locatedblock.getBlock();
InterDatanodeProtocol.LOG.info("b=" + b + ", " + b.getClass());
checkMetaInfo(b, datanode);
long recoveryId = b.getGenerationStamp() + 1;
idp.initReplicaRecovery(new RecoveringBlock(b, locatedblock.getLocations(), recoveryId));
//verify updateBlock
ExtendedBlock newblock = new ExtendedBlock(b.getBlockPoolId(), b.getBlockId(), b.getNumBytes() / 2, b.getGenerationStamp() + 1);
idp.updateReplicaUnderRecovery(b, recoveryId, b.getBlockId(), newblock.getNumBytes());
checkMetaInfo(newblock, datanode);
// Verify correct null response trying to init recovery for a missing block
ExtendedBlock badBlock = new ExtendedBlock("fake-pool", b.getBlockId(), 0, 0);
assertNull(idp.initReplicaRecovery(new RecoveringBlock(badBlock, locatedblock.getLocations(), recoveryId)));
} finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
use of org.apache.hadoop.hdfs.server.protocol.InterDatanodeProtocol in project hadoop by apache.
the class TestInterDatanodeProtocol method testInterDNProtocolTimeout.
/** Test to verify that InterDatanode RPC timesout as expected when
* the server DN does not respond.
*/
@Test(expected = SocketTimeoutException.class)
public void testInterDNProtocolTimeout() throws Throwable {
final Server server = new TestServer(1, true);
server.start();
final InetSocketAddress addr = NetUtils.getConnectAddress(server);
DatanodeID fakeDnId = DFSTestUtil.getLocalDatanodeID(addr.getPort());
DatanodeInfo dInfo = new DatanodeInfoBuilder().setNodeID(fakeDnId).build();
InterDatanodeProtocol proxy = null;
try {
proxy = DataNode.createInterDataNodeProtocolProxy(dInfo, conf, 500, false);
proxy.initReplicaRecovery(new RecoveringBlock(new ExtendedBlock("bpid", 1), null, 100));
fail("Expected SocketTimeoutException exception, but did not get.");
} finally {
if (proxy != null) {
RPC.stopProxy(proxy);
}
server.stop();
}
}
use of org.apache.hadoop.hdfs.server.protocol.InterDatanodeProtocol in project hadoop by apache.
the class DataNode method createInterDataNodeProtocolProxy.
public static InterDatanodeProtocol createInterDataNodeProtocolProxy(DatanodeID datanodeid, final Configuration conf, final int socketTimeout, final boolean connectToDnViaHostname) throws IOException {
final String dnAddr = datanodeid.getIpcAddr(connectToDnViaHostname);
final InetSocketAddress addr = NetUtils.createSocketAddr(dnAddr);
if (LOG.isDebugEnabled()) {
LOG.debug("Connecting to datanode " + dnAddr + " addr=" + addr);
}
final UserGroupInformation loginUgi = UserGroupInformation.getLoginUser();
try {
return loginUgi.doAs(new PrivilegedExceptionAction<InterDatanodeProtocol>() {
@Override
public InterDatanodeProtocol run() throws IOException {
return new InterDatanodeProtocolTranslatorPB(addr, loginUgi, conf, NetUtils.getDefaultSocketFactory(conf), socketTimeout);
}
});
} catch (InterruptedException ie) {
throw new IOException(ie.getMessage());
}
}
Aggregations