use of org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols in project hadoop by apache.
the class TestFsck method testFsckListCorruptSnapshotFiles.
/**
* Test that corrupted snapshot files are listed with full dir.
*/
@Test
public void testFsckListCorruptSnapshotFiles() throws Exception {
conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 1000);
conf.setInt(DFSConfigKeys.DFS_DATANODE_DIRECTORYSCAN_INTERVAL_KEY, 1);
DistributedFileSystem hdfs = null;
final short replFactor = 1;
int numFiles = 3;
int numSnapshots = 0;
cluster = new MiniDFSCluster.Builder(conf).build();
cluster.waitActive();
hdfs = cluster.getFileSystem();
DFSTestUtil util = new DFSTestUtil.Builder().setName("testGetCorruptFiles").setNumFiles(numFiles).setMaxLevels(1).setMaxSize(1024).build();
util.createFiles(hdfs, "/corruptData", (short) 1);
final Path fp = new Path("/corruptData/file");
util.createFile(hdfs, fp, 1024, replFactor, 1000L);
numFiles++;
util.waitReplication(hdfs, "/corruptData", (short) 1);
hdfs.allowSnapshot(new Path("/corruptData"));
hdfs.createSnapshot(new Path("/corruptData"), "mySnapShot");
numSnapshots = numFiles;
String outStr = runFsck(conf, 0, false, "/corruptData", "-list-corruptfileblocks");
System.out.println("1. good fsck out: " + outStr);
assertTrue(outStr.contains("has 0 CORRUPT files"));
// delete the blocks
final String bpid = cluster.getNamesystem().getBlockPoolId();
for (int i = 0; i < numFiles; i++) {
for (int j = 0; j <= 1; j++) {
File storageDir = cluster.getInstanceStorageDir(i, j);
File dataDir = MiniDFSCluster.getFinalizedDir(storageDir, bpid);
List<File> metadataFiles = MiniDFSCluster.getAllBlockMetadataFiles(dataDir);
if (metadataFiles == null) {
continue;
}
for (File metadataFile : metadataFiles) {
File blockFile = Block.metaToBlockFile(metadataFile);
assertTrue("Cannot remove file.", blockFile.delete());
assertTrue("Cannot remove file.", metadataFile.delete());
}
}
}
// Delete file when it has a snapshot
hdfs.delete(fp, false);
numFiles--;
// wait for the namenode to see the corruption
final NamenodeProtocols namenode = cluster.getNameNodeRpc();
CorruptFileBlocks corruptFileBlocks = namenode.listCorruptFileBlocks("/corruptData", null);
int numCorrupt = corruptFileBlocks.getFiles().length;
while (numCorrupt == 0) {
Thread.sleep(1000);
corruptFileBlocks = namenode.listCorruptFileBlocks("/corruptData", null);
numCorrupt = corruptFileBlocks.getFiles().length;
}
// with -includeSnapshots all files are reported
outStr = runFsck(conf, -1, true, "/corruptData", "-list-corruptfileblocks", "-includeSnapshots");
System.out.println("2. bad fsck include snapshot out: " + outStr);
assertTrue(outStr.contains("has " + (numFiles + numSnapshots) + " CORRUPT files"));
assertTrue(outStr.contains("/.snapshot/"));
// without -includeSnapshots only non-snapshots are reported
outStr = runFsck(conf, -1, true, "/corruptData", "-list-corruptfileblocks");
System.out.println("3. bad fsck exclude snapshot out: " + outStr);
assertTrue(outStr.contains("has " + numFiles + " CORRUPT files"));
assertFalse(outStr.contains("/.snapshot/"));
}
use of org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols in project hadoop by apache.
the class TestStartup method testCompression.
@Test
public void testCompression() throws IOException {
LOG.info("Test compressing image.");
Configuration conf = new Configuration();
FileSystem.setDefaultUri(conf, "hdfs://localhost:0");
conf.set(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY, "127.0.0.1:0");
File base_dir = new File(PathUtils.getTestDir(getClass()), "dfs/");
conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, new File(base_dir, "name").getPath());
conf.setBoolean(DFSConfigKeys.DFS_PERMISSIONS_ENABLED_KEY, false);
DFSTestUtil.formatNameNode(conf);
// create an uncompressed image
LOG.info("Create an uncompressed fsimage");
NameNode namenode = new NameNode(conf);
namenode.getNamesystem().mkdirs("/test", new PermissionStatus("hairong", null, FsPermission.getDefault()), true);
NamenodeProtocols nnRpc = namenode.getRpcServer();
assertTrue(nnRpc.getFileInfo("/test").isDir());
nnRpc.setSafeMode(SafeModeAction.SAFEMODE_ENTER, false);
nnRpc.saveNamespace(0, 0);
namenode.stop();
namenode.join();
namenode.joinHttpServer();
// compress image using default codec
LOG.info("Read an uncomressed image and store it compressed using default codec.");
conf.setBoolean(DFSConfigKeys.DFS_IMAGE_COMPRESS_KEY, true);
checkNameSpace(conf);
// read image compressed using the default and compress it using Gzip codec
LOG.info("Read a compressed image and store it using a different codec.");
conf.set(DFSConfigKeys.DFS_IMAGE_COMPRESSION_CODEC_KEY, "org.apache.hadoop.io.compress.GzipCodec");
checkNameSpace(conf);
// read an image compressed in Gzip and store it uncompressed
LOG.info("Read a compressed image and store it as uncompressed.");
conf.setBoolean(DFSConfigKeys.DFS_IMAGE_COMPRESS_KEY, false);
checkNameSpace(conf);
// read an uncomrpessed image and store it uncompressed
LOG.info("Read an uncompressed image and store it as uncompressed.");
checkNameSpace(conf);
}
use of org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols in project hadoop by apache.
the class TestRequestHedgingProxyProvider method testHedgingWhenConnectException.
@Test
public void testHedgingWhenConnectException() throws Exception {
NamenodeProtocols active = Mockito.mock(NamenodeProtocols.class);
Mockito.when(active.getStats()).thenThrow(new ConnectException());
NamenodeProtocols standby = Mockito.mock(NamenodeProtocols.class);
Mockito.when(standby.getStats()).thenThrow(new RemoteException("org.apache.hadoop.ipc.StandbyException", "Standby NameNode"));
RequestHedgingProxyProvider<NamenodeProtocols> provider = new RequestHedgingProxyProvider<>(conf, nnUri, NamenodeProtocols.class, createFactory(active, standby));
try {
provider.getProxy().proxy.getStats();
Assert.fail("Should fail since the active namenode throws" + " ConnectException!");
} catch (MultiException me) {
for (Exception ex : me.getExceptions().values()) {
if (ex instanceof RemoteException) {
Exception rEx = ((RemoteException) ex).unwrapRemoteException();
Assert.assertTrue("Unexpected RemoteException: " + rEx.getMessage(), rEx instanceof StandbyException);
} else {
Assert.assertTrue(ex instanceof ConnectException);
}
}
}
Mockito.verify(active).getStats();
Mockito.verify(standby).getStats();
}
use of org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols in project hadoop by apache.
the class TestRequestHedgingProxyProvider method testPerformFailoverWith3Proxies.
@Test
public void testPerformFailoverWith3Proxies() throws Exception {
conf.set(DFSConfigKeys.DFS_HA_NAMENODES_KEY_PREFIX + "." + ns, "nn1,nn2,nn3");
conf.set(DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY + "." + ns + ".nn3", "machine3.foo.bar:9820");
final AtomicInteger counter = new AtomicInteger(0);
final int[] isGood = { 1 };
final NamenodeProtocols goodMock = Mockito.mock(NamenodeProtocols.class);
Mockito.when(goodMock.getStats()).thenAnswer(new Answer<long[]>() {
@Override
public long[] answer(InvocationOnMock invocation) throws Throwable {
counter.incrementAndGet();
if (isGood[0] == 1) {
Thread.sleep(1000);
return new long[] { 1 };
}
throw new IOException("Was Good mock !!");
}
});
final NamenodeProtocols badMock = Mockito.mock(NamenodeProtocols.class);
Mockito.when(badMock.getStats()).thenAnswer(new Answer<long[]>() {
@Override
public long[] answer(InvocationOnMock invocation) throws Throwable {
counter.incrementAndGet();
if (isGood[0] == 2) {
Thread.sleep(1000);
return new long[] { 2 };
}
throw new IOException("Bad mock !!");
}
});
final NamenodeProtocols worseMock = Mockito.mock(NamenodeProtocols.class);
Mockito.when(worseMock.getStats()).thenAnswer(new Answer<long[]>() {
@Override
public long[] answer(InvocationOnMock invocation) throws Throwable {
counter.incrementAndGet();
if (isGood[0] == 3) {
Thread.sleep(1000);
return new long[] { 3 };
}
throw new IOException("Worse mock !!");
}
});
RequestHedgingProxyProvider<NamenodeProtocols> provider = new RequestHedgingProxyProvider<>(conf, nnUri, NamenodeProtocols.class, createFactory(goodMock, badMock, worseMock));
long[] stats = provider.getProxy().proxy.getStats();
Assert.assertTrue(stats.length == 1);
Assert.assertEquals(1, stats[0]);
Assert.assertEquals(3, counter.get());
Mockito.verify(badMock).getStats();
Mockito.verify(goodMock).getStats();
Mockito.verify(worseMock).getStats();
stats = provider.getProxy().proxy.getStats();
Assert.assertTrue(stats.length == 1);
Assert.assertEquals(1, stats[0]);
// Ensure only the previous successful one is invoked
Mockito.verifyNoMoreInteractions(badMock);
Mockito.verifyNoMoreInteractions(worseMock);
Assert.assertEquals(4, counter.get());
// Flip to standby.. so now this should fail
isGood[0] = 2;
try {
provider.getProxy().proxy.getStats();
Assert.fail("Should fail since previously successful proxy now fails ");
} catch (Exception ex) {
Assert.assertTrue(ex instanceof IOException);
}
Assert.assertEquals(5, counter.get());
provider.performFailover(provider.getProxy().proxy);
stats = provider.getProxy().proxy.getStats();
Assert.assertTrue(stats.length == 1);
Assert.assertEquals(2, stats[0]);
// Counter updates twice since both proxies are tried on failure
Assert.assertEquals(7, counter.get());
stats = provider.getProxy().proxy.getStats();
Assert.assertTrue(stats.length == 1);
Assert.assertEquals(2, stats[0]);
// Counter updates only once now
Assert.assertEquals(8, counter.get());
// Flip to Other standby.. so now this should fail
isGood[0] = 3;
try {
provider.getProxy().proxy.getStats();
Assert.fail("Should fail since previously successful proxy now fails ");
} catch (Exception ex) {
Assert.assertTrue(ex instanceof IOException);
}
// Counter should ipdate only 1 time
Assert.assertEquals(9, counter.get());
provider.performFailover(provider.getProxy().proxy);
stats = provider.getProxy().proxy.getStats();
Assert.assertTrue(stats.length == 1);
// Ensure correct proxy was called
Assert.assertEquals(3, stats[0]);
// Counter updates twice since both proxies are tried on failure
Assert.assertEquals(11, counter.get());
stats = provider.getProxy().proxy.getStats();
Assert.assertTrue(stats.length == 1);
Assert.assertEquals(3, stats[0]);
// Counter updates only once now
Assert.assertEquals(12, counter.get());
}
use of org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols in project hadoop by apache.
the class TestOpenFilesWithSnapshot method testOpenFilesWithRename.
@Test
public void testOpenFilesWithRename() throws Exception {
Path path = new Path("/test");
doWriteAndAbort(fs, path);
// check for zero sized blocks
Path fileWithEmptyBlock = new Path("/test/test/test4");
fs.create(fileWithEmptyBlock);
NamenodeProtocols nameNodeRpc = cluster.getNameNodeRpc();
String clientName = fs.getClient().getClientName();
// create one empty block
nameNodeRpc.addBlock(fileWithEmptyBlock.toString(), clientName, null, null, HdfsConstants.GRANDFATHER_INODE_ID, null, null);
fs.createSnapshot(path, "s2");
fs.rename(new Path("/test/test"), new Path("/test/test-renamed"));
fs.delete(new Path("/test/test-renamed"), true);
restartNameNode();
}
Aggregations