Search in sources :

Example 86 with FileSystem

use of org.apache.hadoop.fs.FileSystem in project hadoop by apache.

the class TestSafeMode method testSafeModeExceptionText.

@Test
public void testSafeModeExceptionText() throws Exception {
    final Path file1 = new Path("/file1");
    DFSTestUtil.createFile(fs, file1, 1024, (short) 1, 0);
    assertTrue("Could not enter SM", dfs.setSafeMode(SafeModeAction.SAFEMODE_ENTER));
    try {
        FSRun fsRun = new FSRun() {

            @Override
            public void run(FileSystem fileSystem) throws IOException {
                ((DistributedFileSystem) fileSystem).setQuota(file1, 1, 1);
            }
        };
        fsRun.run(fs);
        fail("Should not succeed with no exceptions!");
    } catch (RemoteException re) {
        assertEquals(SafeModeException.class.getName(), re.getClassName());
        GenericTestUtils.assertExceptionContains(NameNode.getServiceAddress(conf, true).getHostName(), re);
    } catch (IOException ioe) {
        fail("Encountered exception" + " " + StringUtils.stringifyException(ioe));
    }
}
Also used : Path(org.apache.hadoop.fs.Path) FileSystem(org.apache.hadoop.fs.FileSystem) IOException(java.io.IOException) RemoteException(org.apache.hadoop.ipc.RemoteException) Test(org.junit.Test)

Example 87 with FileSystem

use of org.apache.hadoop.fs.FileSystem in project hadoop by apache.

the class TestSafeModeWithStripedFile method doTest.

/**
   * This util writes a small block group whose size is given by caller.
   * Then write another 2 full stripe blocks.
   * Then shutdown all DNs and start again one by one. and verify the safemode
   * status accordingly.
   *
   * @param smallSize file size of the small block group
   * @param minStorages minimum replicas needed by the block so it can be safe
   */
private void doTest(int smallSize, int minStorages) throws IOException {
    FileSystem fs = cluster.getFileSystem();
    // add 1 block
    byte[] data = StripedFileTestUtil.generateBytes(smallSize);
    Path smallFilePath = new Path("/testStripedFile_" + smallSize);
    DFSTestUtil.writeFile(fs, smallFilePath, data);
    // If we only have 1 block, NN won't enter safemode in the first place
    // because the threshold is 0 blocks.
    // So we need to add another 2 blocks.
    int bigSize = blockSize * dataBlocks * 2;
    Path bigFilePath = new Path("/testStripedFile_" + bigSize);
    data = StripedFileTestUtil.generateBytes(bigSize);
    DFSTestUtil.writeFile(fs, bigFilePath, data);
    // now we have 3 blocks. NN needs 2 blocks to reach the threshold 0.9 of
    // total blocks 3.
    // stopping all DNs
    List<MiniDFSCluster.DataNodeProperties> dnprops = Lists.newArrayList();
    LocatedBlocks lbs = cluster.getNameNodeRpc().getBlockLocations(smallFilePath.toString(), 0, smallSize);
    DatanodeInfo[] locations = lbs.get(0).getLocations();
    for (DatanodeInfo loc : locations) {
        // keep the DNs that have smallFile in the head of dnprops
        dnprops.add(cluster.stopDataNode(loc.getName()));
    }
    for (int i = 0; i < numDNs - locations.length; i++) {
        dnprops.add(cluster.stopDataNode(0));
    }
    cluster.restartNameNode(0);
    NameNode nn = cluster.getNameNode();
    assertTrue(cluster.getNameNode().isInSafeMode());
    assertEquals(0, NameNodeAdapter.getSafeModeSafeBlocks(nn));
    // so the safe blocks count doesn't increment.
    for (int i = 0; i < minStorages - 1; i++) {
        cluster.restartDataNode(dnprops.remove(0));
        cluster.waitActive();
        cluster.triggerBlockReports();
        assertEquals(0, NameNodeAdapter.getSafeModeSafeBlocks(nn));
    }
    // the block of smallFile reaches minStorages,
    // so the safe blocks count increment.
    cluster.restartDataNode(dnprops.remove(0));
    cluster.waitActive();
    cluster.triggerBlockReports();
    assertEquals(1, NameNodeAdapter.getSafeModeSafeBlocks(nn));
    // the 2 blocks of bigFile need DATA_BLK_NUM storages to be safe
    for (int i = minStorages; i < dataBlocks - 1; i++) {
        cluster.restartDataNode(dnprops.remove(0));
        cluster.waitActive();
        cluster.triggerBlockReports();
        assertTrue(nn.isInSafeMode());
    }
    cluster.restartDataNode(dnprops.remove(0));
    cluster.waitActive();
    cluster.triggerBlockReports();
    assertFalse(nn.isInSafeMode());
}
Also used : Path(org.apache.hadoop.fs.Path) NameNode(org.apache.hadoop.hdfs.server.namenode.NameNode) DatanodeInfo(org.apache.hadoop.hdfs.protocol.DatanodeInfo) FileSystem(org.apache.hadoop.fs.FileSystem) LocatedBlocks(org.apache.hadoop.hdfs.protocol.LocatedBlocks)

Example 88 with FileSystem

use of org.apache.hadoop.fs.FileSystem in project hadoop by apache.

the class TestRenameWhileOpen method testWhileOpenRenameToExistentDirectory.

/**
   * open /user/dir1/file1 
   * mkdir /user/dir2
   * move /user/dir1/file1 /user/dir2/
   */
@Test
public void testWhileOpenRenameToExistentDirectory() throws IOException {
    Configuration conf = new HdfsConfiguration();
    // 2s
    final int MAX_IDLE_TIME = 2000;
    conf.setInt("ipc.client.connection.maxidletime", MAX_IDLE_TIME);
    conf.setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 1000);
    conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1);
    conf.setInt(DFSConfigKeys.DFS_NAMENODE_SAFEMODE_THRESHOLD_PCT_KEY, 1);
    System.out.println("Test 3************************************");
    // create cluster
    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
    FileSystem fs = null;
    try {
        cluster.waitActive();
        fs = cluster.getFileSystem();
        // create file1.
        Path dir1 = new Path("/user/dir1");
        Path file1 = new Path(dir1, "file1");
        FSDataOutputStream stm1 = TestFileCreation.createFile(fs, file1, 1);
        System.out.println("testFileCreationDeleteParent: " + "Created file " + file1);
        TestFileCreation.writeFile(stm1);
        stm1.hflush();
        Path dir2 = new Path("/user/dir2");
        fs.mkdirs(dir2);
        fs.rename(file1, dir2);
        // restart cluster.
        // This ensures that leases are persisted in fsimage.
        cluster.shutdown();
        try {
            Thread.sleep(2 * MAX_IDLE_TIME);
        } catch (InterruptedException e) {
        }
        cluster = new MiniDFSCluster.Builder(conf).format(false).build();
        cluster.waitActive();
        // restart cluster yet again. This triggers the code to read in
        // persistent leases from fsimage.
        cluster.shutdown();
        try {
            Thread.sleep(5000);
        } catch (InterruptedException e) {
        }
        cluster = new MiniDFSCluster.Builder(conf).format(false).build();
        cluster.waitActive();
        fs = cluster.getFileSystem();
        Path newfile = new Path("/user/dir2", "file1");
        assertTrue(!fs.exists(file1));
        assertTrue(fs.exists(newfile));
        checkFullFile(fs, newfile);
    } finally {
        fs.close();
        cluster.shutdown();
    }
}
Also used : Path(org.apache.hadoop.fs.Path) Configuration(org.apache.hadoop.conf.Configuration) FileSystem(org.apache.hadoop.fs.FileSystem) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream) Test(org.junit.Test)

Example 89 with FileSystem

use of org.apache.hadoop.fs.FileSystem in project hadoop by apache.

the class TestRenameWhileOpen method testWhileOpenRenameParent.

/**
   * open /user/dir1/file1 /user/dir2/file2
   * mkdir /user/dir3
   * move /user/dir1 /user/dir3
   */
@Test
public void testWhileOpenRenameParent() throws IOException {
    Configuration conf = new HdfsConfiguration();
    // 2s
    final int MAX_IDLE_TIME = 2000;
    conf.setInt("ipc.client.connection.maxidletime", MAX_IDLE_TIME);
    conf.setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 1000);
    conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1);
    conf.setInt(DFSConfigKeys.DFS_NAMENODE_SAFEMODE_THRESHOLD_PCT_KEY, 1);
    conf.setInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, TestFileCreation.blockSize);
    // create cluster
    System.out.println("Test 1*****************************");
    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
    FileSystem fs = null;
    try {
        cluster.waitActive();
        fs = cluster.getFileSystem();
        // Normally, the in-progress edit log would be finalized by
        // FSEditLog#endCurrentLogSegment.  For testing purposes, we
        // disable that here.
        FSEditLog spyLog = spy(cluster.getNameNode().getFSImage().getEditLog());
        doNothing().when(spyLog).endCurrentLogSegment(Mockito.anyBoolean());
        DFSTestUtil.setEditLogForTesting(cluster.getNamesystem(), spyLog);
        // create file1.
        Path dir1 = new Path("/user/a+b/dir1");
        Path file1 = new Path(dir1, "file1");
        FSDataOutputStream stm1 = TestFileCreation.createFile(fs, file1, 1);
        System.out.println("testFileCreationDeleteParent: " + "Created file " + file1);
        TestFileCreation.writeFile(stm1);
        stm1.hflush();
        // create file2.
        Path dir2 = new Path("/user/dir2");
        Path file2 = new Path(dir2, "file2");
        FSDataOutputStream stm2 = TestFileCreation.createFile(fs, file2, 1);
        System.out.println("testFileCreationDeleteParent: " + "Created file " + file2);
        TestFileCreation.writeFile(stm2);
        stm2.hflush();
        // move dir1 while file1 is open
        Path dir3 = new Path("/user/dir3");
        fs.mkdirs(dir3);
        fs.rename(dir1, dir3);
        // create file3
        Path file3 = new Path(dir3, "file3");
        FSDataOutputStream stm3 = fs.create(file3);
        fs.rename(file3, new Path(dir3, "bozo"));
        // Get a new block for the file.
        TestFileCreation.writeFile(stm3, TestFileCreation.blockSize + 1);
        stm3.hflush();
        // Stop the NameNode before closing the files.
        // This will ensure that the write leases are still active and present
        // in the edit log.  Simiarly, there should be a pending ADD_BLOCK_OP
        // for file3, since we just added a block to that file.
        cluster.getNameNode().stop();
        // Restart cluster.
        cluster.shutdown();
        try {
            Thread.sleep(2 * MAX_IDLE_TIME);
        } catch (InterruptedException e) {
        }
        cluster = new MiniDFSCluster.Builder(conf).format(false).build();
        cluster.waitActive();
        // restart cluster yet again. This triggers the code to read in
        // persistent leases from the edit log.
        cluster.shutdown();
        try {
            Thread.sleep(5000);
        } catch (InterruptedException e) {
        }
        cluster = new MiniDFSCluster.Builder(conf).format(false).build();
        cluster.waitActive();
        fs = cluster.getFileSystem();
        Path newfile = new Path("/user/dir3/dir1", "file1");
        assertTrue(!fs.exists(file1));
        assertTrue(fs.exists(file2));
        assertTrue(fs.exists(newfile));
        checkFullFile(fs, newfile);
    } finally {
        fs.close();
        cluster.shutdown();
    }
}
Also used : Path(org.apache.hadoop.fs.Path) Configuration(org.apache.hadoop.conf.Configuration) FSEditLog(org.apache.hadoop.hdfs.server.namenode.FSEditLog) FileSystem(org.apache.hadoop.fs.FileSystem) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream) Test(org.junit.Test)

Example 90 with FileSystem

use of org.apache.hadoop.fs.FileSystem in project hadoop by apache.

the class TestReplication method testNoExtraReplicationWhenBlockReceivedIsLate.

/**
   * This test makes sure that, when a file is closed before all
   * of the datanodes in the pipeline have reported their replicas,
   * the NameNode doesn't consider the block under-replicated too
   * aggressively. It is a regression test for HDFS-1172.
   */
@Test(timeout = 60000)
public void testNoExtraReplicationWhenBlockReceivedIsLate() throws Exception {
    LOG.info("Test block replication when blockReceived is late");
    final short numDataNodes = 3;
    final short replication = 3;
    final Configuration conf = new Configuration();
    conf.setInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 1024);
    final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDataNodes).build();
    final String testFile = "/replication-test-file";
    final Path testPath = new Path(testFile);
    final BlockManager bm = cluster.getNameNode().getNamesystem().getBlockManager();
    try {
        cluster.waitActive();
        // Artificially delay IBR from 1 DataNode.
        // this ensures that the client's completeFile() RPC will get to the
        // NN before some of the replicas are reported.
        NameNode nn = cluster.getNameNode();
        DataNode dn = cluster.getDataNodes().get(0);
        DatanodeProtocolClientSideTranslatorPB spy = InternalDataNodeTestUtils.spyOnBposToNN(dn, nn);
        DelayAnswer delayer = new GenericTestUtils.DelayAnswer(LOG);
        Mockito.doAnswer(delayer).when(spy).blockReceivedAndDeleted(Mockito.<DatanodeRegistration>anyObject(), Mockito.anyString(), Mockito.<StorageReceivedDeletedBlocks[]>anyObject());
        FileSystem fs = cluster.getFileSystem();
        // Create and close a small file with two blocks
        DFSTestUtil.createFile(fs, testPath, 1500, replication, 0);
        // schedule replication via BlockManager#computeReplicationWork
        BlockManagerTestUtil.computeAllPendingWork(bm);
        // Initially, should have some pending replication since the close()
        // is earlier than at lease one of the reportReceivedDeletedBlocks calls
        assertTrue(pendingReplicationCount(bm) > 0);
        // release pending IBR.
        delayer.waitForCall();
        delayer.proceed();
        delayer.waitForResult();
        // make sure DataNodes do replication work if exists
        for (DataNode d : cluster.getDataNodes()) {
            DataNodeTestUtils.triggerHeartbeat(d);
        }
        // Wait until there is nothing pending
        try {
            GenericTestUtils.waitFor(new Supplier<Boolean>() {

                @Override
                public Boolean get() {
                    return pendingReplicationCount(bm) == 0;
                }
            }, 100, 3000);
        } catch (TimeoutException e) {
            fail("timed out while waiting for no pending replication.");
        }
        // Check that none of the datanodes have serviced a replication request.
        // i.e. that the NameNode didn't schedule any spurious replication.
        assertNoReplicationWasPerformed(cluster);
    } finally {
        if (cluster != null) {
            cluster.shutdown();
        }
    }
}
Also used : Path(org.apache.hadoop.fs.Path) NameNode(org.apache.hadoop.hdfs.server.namenode.NameNode) Configuration(org.apache.hadoop.conf.Configuration) MetricsRecordBuilder(org.apache.hadoop.metrics2.MetricsRecordBuilder) DelayAnswer(org.apache.hadoop.test.GenericTestUtils.DelayAnswer) DatanodeProtocolClientSideTranslatorPB(org.apache.hadoop.hdfs.protocolPB.DatanodeProtocolClientSideTranslatorPB) BlockManager(org.apache.hadoop.hdfs.server.blockmanagement.BlockManager) DataNode(org.apache.hadoop.hdfs.server.datanode.DataNode) FileSystem(org.apache.hadoop.fs.FileSystem) StorageReceivedDeletedBlocks(org.apache.hadoop.hdfs.server.protocol.StorageReceivedDeletedBlocks) TimeoutException(java.util.concurrent.TimeoutException) Test(org.junit.Test)

Aggregations

FileSystem (org.apache.hadoop.fs.FileSystem)2611 Path (org.apache.hadoop.fs.Path)2199 Test (org.junit.Test)1034 Configuration (org.apache.hadoop.conf.Configuration)890 IOException (java.io.IOException)757 FileStatus (org.apache.hadoop.fs.FileStatus)419 FSDataOutputStream (org.apache.hadoop.fs.FSDataOutputStream)264 MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)227 ArrayList (java.util.ArrayList)208 File (java.io.File)181 DistributedFileSystem (org.apache.hadoop.hdfs.DistributedFileSystem)165 JobConf (org.apache.hadoop.mapred.JobConf)163 FSDataInputStream (org.apache.hadoop.fs.FSDataInputStream)151 HdfsConfiguration (org.apache.hadoop.hdfs.HdfsConfiguration)145 URI (java.net.URI)135 SequenceFile (org.apache.hadoop.io.SequenceFile)118 Text (org.apache.hadoop.io.Text)112 FileNotFoundException (java.io.FileNotFoundException)102 FsPermission (org.apache.hadoop.fs.permission.FsPermission)94 Job (org.apache.hadoop.mapreduce.Job)81