Search in sources :

Example 36 with NamenodeProtocols

use of org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols in project hadoop by apache.

the class TestFsck method testFsckListCorruptSnapshotFiles.

/**
   * Test that corrupted snapshot files are listed with full dir.
   */
@Test
public void testFsckListCorruptSnapshotFiles() throws Exception {
    conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 1000);
    conf.setInt(DFSConfigKeys.DFS_DATANODE_DIRECTORYSCAN_INTERVAL_KEY, 1);
    DistributedFileSystem hdfs = null;
    final short replFactor = 1;
    int numFiles = 3;
    int numSnapshots = 0;
    cluster = new MiniDFSCluster.Builder(conf).build();
    cluster.waitActive();
    hdfs = cluster.getFileSystem();
    DFSTestUtil util = new DFSTestUtil.Builder().setName("testGetCorruptFiles").setNumFiles(numFiles).setMaxLevels(1).setMaxSize(1024).build();
    util.createFiles(hdfs, "/corruptData", (short) 1);
    final Path fp = new Path("/corruptData/file");
    util.createFile(hdfs, fp, 1024, replFactor, 1000L);
    numFiles++;
    util.waitReplication(hdfs, "/corruptData", (short) 1);
    hdfs.allowSnapshot(new Path("/corruptData"));
    hdfs.createSnapshot(new Path("/corruptData"), "mySnapShot");
    numSnapshots = numFiles;
    String outStr = runFsck(conf, 0, false, "/corruptData", "-list-corruptfileblocks");
    System.out.println("1. good fsck out: " + outStr);
    assertTrue(outStr.contains("has 0 CORRUPT files"));
    // delete the blocks
    final String bpid = cluster.getNamesystem().getBlockPoolId();
    for (int i = 0; i < numFiles; i++) {
        for (int j = 0; j <= 1; j++) {
            File storageDir = cluster.getInstanceStorageDir(i, j);
            File dataDir = MiniDFSCluster.getFinalizedDir(storageDir, bpid);
            List<File> metadataFiles = MiniDFSCluster.getAllBlockMetadataFiles(dataDir);
            if (metadataFiles == null) {
                continue;
            }
            for (File metadataFile : metadataFiles) {
                File blockFile = Block.metaToBlockFile(metadataFile);
                assertTrue("Cannot remove file.", blockFile.delete());
                assertTrue("Cannot remove file.", metadataFile.delete());
            }
        }
    }
    // Delete file when it has a snapshot
    hdfs.delete(fp, false);
    numFiles--;
    // wait for the namenode to see the corruption
    final NamenodeProtocols namenode = cluster.getNameNodeRpc();
    CorruptFileBlocks corruptFileBlocks = namenode.listCorruptFileBlocks("/corruptData", null);
    int numCorrupt = corruptFileBlocks.getFiles().length;
    while (numCorrupt == 0) {
        Thread.sleep(1000);
        corruptFileBlocks = namenode.listCorruptFileBlocks("/corruptData", null);
        numCorrupt = corruptFileBlocks.getFiles().length;
    }
    // with -includeSnapshots all files are reported
    outStr = runFsck(conf, -1, true, "/corruptData", "-list-corruptfileblocks", "-includeSnapshots");
    System.out.println("2. bad fsck include snapshot out: " + outStr);
    assertTrue(outStr.contains("has " + (numFiles + numSnapshots) + " CORRUPT files"));
    assertTrue(outStr.contains("/.snapshot/"));
    // without -includeSnapshots only non-snapshots are reported
    outStr = runFsck(conf, -1, true, "/corruptData", "-list-corruptfileblocks");
    System.out.println("3. bad fsck exclude snapshot out: " + outStr);
    assertTrue(outStr.contains("has " + numFiles + " CORRUPT files"));
    assertFalse(outStr.contains("/.snapshot/"));
}
Also used : Path(org.apache.hadoop.fs.Path) DFSTestUtil(org.apache.hadoop.hdfs.DFSTestUtil) NamenodeProtocols(org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols) CorruptFileBlocks(org.apache.hadoop.hdfs.protocol.CorruptFileBlocks) Matchers.anyString(org.mockito.Matchers.anyString) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) RandomAccessFile(java.io.RandomAccessFile) File(java.io.File) Test(org.junit.Test)

Example 37 with NamenodeProtocols

use of org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols in project hadoop by apache.

the class TestStartup method testCompression.

@Test
public void testCompression() throws IOException {
    LOG.info("Test compressing image.");
    Configuration conf = new Configuration();
    FileSystem.setDefaultUri(conf, "hdfs://localhost:0");
    conf.set(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY, "127.0.0.1:0");
    File base_dir = new File(PathUtils.getTestDir(getClass()), "dfs/");
    conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, new File(base_dir, "name").getPath());
    conf.setBoolean(DFSConfigKeys.DFS_PERMISSIONS_ENABLED_KEY, false);
    DFSTestUtil.formatNameNode(conf);
    // create an uncompressed image
    LOG.info("Create an uncompressed fsimage");
    NameNode namenode = new NameNode(conf);
    namenode.getNamesystem().mkdirs("/test", new PermissionStatus("hairong", null, FsPermission.getDefault()), true);
    NamenodeProtocols nnRpc = namenode.getRpcServer();
    assertTrue(nnRpc.getFileInfo("/test").isDir());
    nnRpc.setSafeMode(SafeModeAction.SAFEMODE_ENTER, false);
    nnRpc.saveNamespace(0, 0);
    namenode.stop();
    namenode.join();
    namenode.joinHttpServer();
    // compress image using default codec
    LOG.info("Read an uncomressed image and store it compressed using default codec.");
    conf.setBoolean(DFSConfigKeys.DFS_IMAGE_COMPRESS_KEY, true);
    checkNameSpace(conf);
    // read image compressed using the default and compress it using Gzip codec
    LOG.info("Read a compressed image and store it using a different codec.");
    conf.set(DFSConfigKeys.DFS_IMAGE_COMPRESSION_CODEC_KEY, "org.apache.hadoop.io.compress.GzipCodec");
    checkNameSpace(conf);
    // read an image compressed in Gzip and store it uncompressed
    LOG.info("Read a compressed image and store it as uncompressed.");
    conf.setBoolean(DFSConfigKeys.DFS_IMAGE_COMPRESS_KEY, false);
    checkNameSpace(conf);
    // read an uncomrpessed image and store it uncompressed
    LOG.info("Read an uncompressed image and store it as uncompressed.");
    checkNameSpace(conf);
}
Also used : NamenodeProtocols(org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols) Configuration(org.apache.hadoop.conf.Configuration) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) NameNodeFile(org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeFile) File(java.io.File) PermissionStatus(org.apache.hadoop.fs.permission.PermissionStatus) Test(org.junit.Test)

Example 38 with NamenodeProtocols

use of org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols in project hadoop by apache.

the class TestRequestHedgingProxyProvider method testHedgingWhenConnectException.

@Test
public void testHedgingWhenConnectException() throws Exception {
    NamenodeProtocols active = Mockito.mock(NamenodeProtocols.class);
    Mockito.when(active.getStats()).thenThrow(new ConnectException());
    NamenodeProtocols standby = Mockito.mock(NamenodeProtocols.class);
    Mockito.when(standby.getStats()).thenThrow(new RemoteException("org.apache.hadoop.ipc.StandbyException", "Standby NameNode"));
    RequestHedgingProxyProvider<NamenodeProtocols> provider = new RequestHedgingProxyProvider<>(conf, nnUri, NamenodeProtocols.class, createFactory(active, standby));
    try {
        provider.getProxy().proxy.getStats();
        Assert.fail("Should fail since the active namenode throws" + " ConnectException!");
    } catch (MultiException me) {
        for (Exception ex : me.getExceptions().values()) {
            if (ex instanceof RemoteException) {
                Exception rEx = ((RemoteException) ex).unwrapRemoteException();
                Assert.assertTrue("Unexpected RemoteException: " + rEx.getMessage(), rEx instanceof StandbyException);
            } else {
                Assert.assertTrue(ex instanceof ConnectException);
            }
        }
    }
    Mockito.verify(active).getStats();
    Mockito.verify(standby).getStats();
}
Also used : NamenodeProtocols(org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols) StandbyException(org.apache.hadoop.ipc.StandbyException) RemoteException(org.apache.hadoop.ipc.RemoteException) MultiException(org.apache.hadoop.io.retry.MultiException) URISyntaxException(java.net.URISyntaxException) ConnectException(java.net.ConnectException) MultiException(org.apache.hadoop.io.retry.MultiException) StandbyException(org.apache.hadoop.ipc.StandbyException) IOException(java.io.IOException) EOFException(java.io.EOFException) RemoteException(org.apache.hadoop.ipc.RemoteException) FileNotFoundException(java.io.FileNotFoundException) ConnectException(java.net.ConnectException) Test(org.junit.Test)

Example 39 with NamenodeProtocols

use of org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols in project hadoop by apache.

the class TestRequestHedgingProxyProvider method testPerformFailoverWith3Proxies.

@Test
public void testPerformFailoverWith3Proxies() throws Exception {
    conf.set(DFSConfigKeys.DFS_HA_NAMENODES_KEY_PREFIX + "." + ns, "nn1,nn2,nn3");
    conf.set(DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY + "." + ns + ".nn3", "machine3.foo.bar:9820");
    final AtomicInteger counter = new AtomicInteger(0);
    final int[] isGood = { 1 };
    final NamenodeProtocols goodMock = Mockito.mock(NamenodeProtocols.class);
    Mockito.when(goodMock.getStats()).thenAnswer(new Answer<long[]>() {

        @Override
        public long[] answer(InvocationOnMock invocation) throws Throwable {
            counter.incrementAndGet();
            if (isGood[0] == 1) {
                Thread.sleep(1000);
                return new long[] { 1 };
            }
            throw new IOException("Was Good mock !!");
        }
    });
    final NamenodeProtocols badMock = Mockito.mock(NamenodeProtocols.class);
    Mockito.when(badMock.getStats()).thenAnswer(new Answer<long[]>() {

        @Override
        public long[] answer(InvocationOnMock invocation) throws Throwable {
            counter.incrementAndGet();
            if (isGood[0] == 2) {
                Thread.sleep(1000);
                return new long[] { 2 };
            }
            throw new IOException("Bad mock !!");
        }
    });
    final NamenodeProtocols worseMock = Mockito.mock(NamenodeProtocols.class);
    Mockito.when(worseMock.getStats()).thenAnswer(new Answer<long[]>() {

        @Override
        public long[] answer(InvocationOnMock invocation) throws Throwable {
            counter.incrementAndGet();
            if (isGood[0] == 3) {
                Thread.sleep(1000);
                return new long[] { 3 };
            }
            throw new IOException("Worse mock !!");
        }
    });
    RequestHedgingProxyProvider<NamenodeProtocols> provider = new RequestHedgingProxyProvider<>(conf, nnUri, NamenodeProtocols.class, createFactory(goodMock, badMock, worseMock));
    long[] stats = provider.getProxy().proxy.getStats();
    Assert.assertTrue(stats.length == 1);
    Assert.assertEquals(1, stats[0]);
    Assert.assertEquals(3, counter.get());
    Mockito.verify(badMock).getStats();
    Mockito.verify(goodMock).getStats();
    Mockito.verify(worseMock).getStats();
    stats = provider.getProxy().proxy.getStats();
    Assert.assertTrue(stats.length == 1);
    Assert.assertEquals(1, stats[0]);
    // Ensure only the previous successful one is invoked
    Mockito.verifyNoMoreInteractions(badMock);
    Mockito.verifyNoMoreInteractions(worseMock);
    Assert.assertEquals(4, counter.get());
    // Flip to standby.. so now this should fail
    isGood[0] = 2;
    try {
        provider.getProxy().proxy.getStats();
        Assert.fail("Should fail since previously successful proxy now fails ");
    } catch (Exception ex) {
        Assert.assertTrue(ex instanceof IOException);
    }
    Assert.assertEquals(5, counter.get());
    provider.performFailover(provider.getProxy().proxy);
    stats = provider.getProxy().proxy.getStats();
    Assert.assertTrue(stats.length == 1);
    Assert.assertEquals(2, stats[0]);
    // Counter updates twice since both proxies are tried on failure
    Assert.assertEquals(7, counter.get());
    stats = provider.getProxy().proxy.getStats();
    Assert.assertTrue(stats.length == 1);
    Assert.assertEquals(2, stats[0]);
    // Counter updates only once now
    Assert.assertEquals(8, counter.get());
    // Flip to Other standby.. so now this should fail
    isGood[0] = 3;
    try {
        provider.getProxy().proxy.getStats();
        Assert.fail("Should fail since previously successful proxy now fails ");
    } catch (Exception ex) {
        Assert.assertTrue(ex instanceof IOException);
    }
    // Counter should ipdate only 1 time
    Assert.assertEquals(9, counter.get());
    provider.performFailover(provider.getProxy().proxy);
    stats = provider.getProxy().proxy.getStats();
    Assert.assertTrue(stats.length == 1);
    // Ensure correct proxy was called
    Assert.assertEquals(3, stats[0]);
    // Counter updates twice since both proxies are tried on failure
    Assert.assertEquals(11, counter.get());
    stats = provider.getProxy().proxy.getStats();
    Assert.assertTrue(stats.length == 1);
    Assert.assertEquals(3, stats[0]);
    // Counter updates only once now
    Assert.assertEquals(12, counter.get());
}
Also used : NamenodeProtocols(org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) InvocationOnMock(org.mockito.invocation.InvocationOnMock) IOException(java.io.IOException) URISyntaxException(java.net.URISyntaxException) ConnectException(java.net.ConnectException) MultiException(org.apache.hadoop.io.retry.MultiException) StandbyException(org.apache.hadoop.ipc.StandbyException) IOException(java.io.IOException) EOFException(java.io.EOFException) RemoteException(org.apache.hadoop.ipc.RemoteException) FileNotFoundException(java.io.FileNotFoundException) Test(org.junit.Test)

Example 40 with NamenodeProtocols

use of org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols in project hadoop by apache.

the class TestOpenFilesWithSnapshot method testOpenFilesWithRename.

@Test
public void testOpenFilesWithRename() throws Exception {
    Path path = new Path("/test");
    doWriteAndAbort(fs, path);
    // check for zero sized blocks
    Path fileWithEmptyBlock = new Path("/test/test/test4");
    fs.create(fileWithEmptyBlock);
    NamenodeProtocols nameNodeRpc = cluster.getNameNodeRpc();
    String clientName = fs.getClient().getClientName();
    // create one empty block
    nameNodeRpc.addBlock(fileWithEmptyBlock.toString(), clientName, null, null, HdfsConstants.GRANDFATHER_INODE_ID, null, null);
    fs.createSnapshot(path, "s2");
    fs.rename(new Path("/test/test"), new Path("/test/test-renamed"));
    fs.delete(new Path("/test/test-renamed"), true);
    restartNameNode();
}
Also used : Path(org.apache.hadoop.fs.Path) NamenodeProtocols(org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols) Test(org.junit.Test)

Aggregations

NamenodeProtocols (org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols)54 Test (org.junit.Test)45 IOException (java.io.IOException)24 Configuration (org.apache.hadoop.conf.Configuration)21 Path (org.apache.hadoop.fs.Path)19 FileSystem (org.apache.hadoop.fs.FileSystem)16 MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)15 HdfsConfiguration (org.apache.hadoop.hdfs.HdfsConfiguration)12 RemoteException (org.apache.hadoop.ipc.RemoteException)10 FSDataOutputStream (org.apache.hadoop.fs.FSDataOutputStream)9 LocatedBlock (org.apache.hadoop.hdfs.protocol.LocatedBlock)9 File (java.io.File)8 FileNotFoundException (java.io.FileNotFoundException)8 DistributedFileSystem (org.apache.hadoop.hdfs.DistributedFileSystem)8 DatanodeInfo (org.apache.hadoop.hdfs.protocol.DatanodeInfo)7 StandbyException (org.apache.hadoop.ipc.StandbyException)7 EOFException (java.io.EOFException)6 ConnectException (java.net.ConnectException)6 URISyntaxException (java.net.URISyntaxException)6 ExtendedBlock (org.apache.hadoop.hdfs.protocol.ExtendedBlock)6