use of org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols in project hadoop by apache.
the class TestDatanodeRegistration method testRegistrationWithDifferentSoftwareVersions.
@Test
public void testRegistrationWithDifferentSoftwareVersions() throws Exception {
Configuration conf = new HdfsConfiguration();
conf.set(DFSConfigKeys.DFS_DATANODE_MIN_SUPPORTED_NAMENODE_VERSION_KEY, "3.0.0");
conf.set(DFSConfigKeys.DFS_NAMENODE_MIN_SUPPORTED_DATANODE_VERSION_KEY, "3.0.0");
MiniDFSCluster cluster = null;
try {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
NamenodeProtocols rpcServer = cluster.getNameNodeRpc();
long nnCTime = cluster.getNamesystem().getFSImage().getStorage().getCTime();
StorageInfo mockStorageInfo = mock(StorageInfo.class);
doReturn(nnCTime).when(mockStorageInfo).getCTime();
DatanodeRegistration mockDnReg = mock(DatanodeRegistration.class);
doReturn(HdfsServerConstants.DATANODE_LAYOUT_VERSION).when(mockDnReg).getVersion();
doReturn("127.0.0.1").when(mockDnReg).getIpAddr();
doReturn(123).when(mockDnReg).getXferPort();
doReturn("fake-storage-id").when(mockDnReg).getDatanodeUuid();
doReturn(mockStorageInfo).when(mockDnReg).getStorageInfo();
// Should succeed when software versions are the same.
doReturn("3.0.0").when(mockDnReg).getSoftwareVersion();
rpcServer.registerDatanode(mockDnReg);
// Should succeed when software version of DN is above minimum required by NN.
doReturn("4.0.0").when(mockDnReg).getSoftwareVersion();
rpcServer.registerDatanode(mockDnReg);
// Should fail when software version of DN is below minimum required by NN.
doReturn("2.0.0").when(mockDnReg).getSoftwareVersion();
try {
rpcServer.registerDatanode(mockDnReg);
fail("Should not have been able to register DN with too-low version.");
} catch (IncorrectVersionException ive) {
GenericTestUtils.assertExceptionContains("The reported DataNode version is too low", ive);
LOG.info("Got expected exception", ive);
}
} finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
use of org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols in project hadoop by apache.
the class TestEditLogRace method testEditLogRolling.
/**
* Tests rolling edit logs while transactions are ongoing.
*/
@Test
public void testEditLogRolling() throws Exception {
// start a cluster
Configuration conf = getConf();
final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(NUM_DATA_NODES).build();
FileSystem fileSys = null;
AtomicReference<Throwable> caughtErr = new AtomicReference<Throwable>();
try {
cluster.waitActive();
fileSys = cluster.getFileSystem();
final NamenodeProtocols nn = cluster.getNameNode().getRpcServer();
FSImage fsimage = cluster.getNamesystem().getFSImage();
StorageDirectory sd = fsimage.getStorage().getStorageDir(0);
startTransactionWorkers(cluster, caughtErr);
long previousLogTxId = 1;
for (int i = 0; i < NUM_ROLLS && caughtErr.get() == null; i++) {
try {
Thread.sleep(20);
} catch (InterruptedException e) {
}
LOG.info("Starting roll " + i + ".");
CheckpointSignature sig = nn.rollEditLog();
long nextLog = sig.curSegmentTxId;
String logFileName = NNStorage.getFinalizedEditsFileName(previousLogTxId, nextLog - 1);
previousLogTxId += verifyEditLogs(cluster.getNamesystem(), fsimage, logFileName, previousLogTxId);
assertEquals(previousLogTxId, nextLog);
File expectedLog = NNStorage.getInProgressEditsFile(sd, previousLogTxId);
assertTrue("Expect " + expectedLog + " to exist", expectedLog.exists());
}
} finally {
stopTransactionWorkers();
if (caughtErr.get() != null) {
throw new RuntimeException(caughtErr.get());
}
if (fileSys != null)
fileSys.close();
if (cluster != null)
cluster.shutdown();
}
}
use of org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols in project hadoop by apache.
the class TestDFSClientRetries method testIdempotentAllocateBlockAndClose.
/**
* Test that getAdditionalBlock() and close() are idempotent. This allows
* a client to safely retry a call and still produce a correct
* file. See HDFS-3031.
*/
@Test
public void testIdempotentAllocateBlockAndClose() throws Exception {
final String src = "/testIdempotentAllocateBlock";
Path file = new Path(src);
conf.setInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 4096);
final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
try {
cluster.waitActive();
FileSystem fs = cluster.getFileSystem();
NamenodeProtocols preSpyNN = cluster.getNameNodeRpc();
NamenodeProtocols spyNN = spy(preSpyNN);
DFSClient client = new DFSClient(null, spyNN, conf, null);
// Make the call to addBlock() get called twice, as if it were retried
// due to an IPC issue.
doAnswer(new Answer<LocatedBlock>() {
private int getBlockCount(LocatedBlock ret) throws IOException {
LocatedBlocks lb = cluster.getNameNodeRpc().getBlockLocations(src, 0, Long.MAX_VALUE);
assertEquals(lb.getLastLocatedBlock().getBlock(), ret.getBlock());
return lb.getLocatedBlocks().size();
}
@Override
public LocatedBlock answer(InvocationOnMock invocation) throws Throwable {
LOG.info("Called addBlock: " + Arrays.toString(invocation.getArguments()));
// call first time
// warp NotReplicatedYetException with RemoteException as rpc does.
final LocatedBlock ret;
try {
ret = (LocatedBlock) invocation.callRealMethod();
} catch (NotReplicatedYetException e) {
throw new RemoteException(e.getClass().getName(), e.getMessage());
}
final int blockCount = getBlockCount(ret);
// Retrying should result in a new block at the end of the file.
// (abandoning the old one)
// It should not have NotReplicatedYetException.
final LocatedBlock ret2;
try {
ret2 = (LocatedBlock) invocation.callRealMethod();
} catch (NotReplicatedYetException e) {
throw new AssertionError("Unexpected exception", e);
}
final int blockCount2 = getBlockCount(ret2);
// We shouldn't have gained an extra block by the RPC.
assertEquals(blockCount, blockCount2);
return ret2;
}
}).when(spyNN).addBlock(Mockito.anyString(), Mockito.anyString(), Mockito.<ExtendedBlock>any(), Mockito.<DatanodeInfo[]>any(), Mockito.anyLong(), Mockito.<String[]>any(), Mockito.<EnumSet<AddBlockFlag>>any());
doAnswer(new Answer<Boolean>() {
@Override
public Boolean answer(InvocationOnMock invocation) throws Throwable {
// complete() may return false a few times before it returns
// true. We want to wait until it returns true, and then
// make it retry one more time after that.
LOG.info("Called complete:");
if (!(Boolean) invocation.callRealMethod()) {
LOG.info("Complete call returned false, not faking a retry RPC");
return false;
}
// We got a successful close. Call it again to check idempotence.
try {
boolean ret = (Boolean) invocation.callRealMethod();
LOG.info("Complete call returned true, faked second RPC. " + "Returned: " + ret);
return ret;
} catch (Throwable t) {
LOG.error("Idempotent retry threw exception", t);
throw t;
}
}
}).when(spyNN).complete(Mockito.anyString(), Mockito.anyString(), Mockito.<ExtendedBlock>any(), anyLong());
OutputStream stm = client.create(file.toString(), true);
try {
AppendTestUtil.write(stm, 0, 10000);
stm.close();
stm = null;
} finally {
IOUtils.cleanup(LOG, stm);
}
// Make sure the mock was actually properly injected.
Mockito.verify(spyNN, Mockito.atLeastOnce()).addBlock(Mockito.anyString(), Mockito.anyString(), Mockito.<ExtendedBlock>any(), Mockito.<DatanodeInfo[]>any(), Mockito.anyLong(), Mockito.<String[]>any(), Mockito.<EnumSet<AddBlockFlag>>any());
Mockito.verify(spyNN, Mockito.atLeastOnce()).complete(Mockito.anyString(), Mockito.anyString(), Mockito.<ExtendedBlock>any(), anyLong());
AppendTestUtil.check(fs, file, 10000);
} finally {
cluster.shutdown();
}
}
use of org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols in project hadoop by apache.
the class TestDFSClientRetries method testDFSClientConfigurationLocateFollowingBlockInitialDelay.
@Test
public void testDFSClientConfigurationLocateFollowingBlockInitialDelay() throws Exception {
// test if HdfsClientConfigKeys.BlockWrite.LOCATEFOLLOWINGBLOCK_INITIAL_DELAY_KEY
// is not configured, verify DFSClient uses the default value 400.
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
try {
cluster.waitActive();
NamenodeProtocols nn = cluster.getNameNodeRpc();
DFSClient client = new DFSClient(null, nn, conf, null);
assertEquals(client.getConf().getBlockWriteLocateFollowingInitialDelayMs(), 400);
// change HdfsClientConfigKeys.BlockWrite.LOCATEFOLLOWINGBLOCK_INITIAL_DELAY_KEY,
// verify DFSClient uses the configured value 1000.
conf.setInt(HdfsClientConfigKeys.BlockWrite.LOCATEFOLLOWINGBLOCK_INITIAL_DELAY_MS_KEY, 1000);
client = new DFSClient(null, nn, conf, null);
assertEquals(client.getConf().getBlockWriteLocateFollowingInitialDelayMs(), 1000);
} finally {
cluster.shutdown();
}
}
use of org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols in project hadoop by apache.
the class TestDFSMkdirs method testMkdirRpcNonCanonicalPath.
/**
* Regression test for HDFS-3626. Creates a file using a non-canonical path
* (i.e. with extra slashes between components) and makes sure that the NN
* rejects it.
*/
@Test
public void testMkdirRpcNonCanonicalPath() throws IOException {
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
try {
NamenodeProtocols nnrpc = cluster.getNameNodeRpc();
for (String pathStr : NON_CANONICAL_PATHS) {
try {
nnrpc.mkdirs(pathStr, new FsPermission((short) 0755), true);
fail("Did not fail when called with a non-canonicalized path: " + pathStr);
} catch (InvalidPathException ipe) {
// expected
}
}
} finally {
cluster.shutdown();
}
}
Aggregations