Search in sources :

Example 1 with FSEditLog

use of org.apache.hadoop.hdfs.server.namenode.FSEditLog in project hadoop by apache.

the class TestRenameWhileOpen method testWhileOpenRenameParent.

/**
   * open /user/dir1/file1 /user/dir2/file2
   * mkdir /user/dir3
   * move /user/dir1 /user/dir3
   */
@Test
public void testWhileOpenRenameParent() throws IOException {
    Configuration conf = new HdfsConfiguration();
    // 2s
    final int MAX_IDLE_TIME = 2000;
    conf.setInt("ipc.client.connection.maxidletime", MAX_IDLE_TIME);
    conf.setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 1000);
    conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1);
    conf.setInt(DFSConfigKeys.DFS_NAMENODE_SAFEMODE_THRESHOLD_PCT_KEY, 1);
    conf.setInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, TestFileCreation.blockSize);
    // create cluster
    System.out.println("Test 1*****************************");
    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
    FileSystem fs = null;
    try {
        cluster.waitActive();
        fs = cluster.getFileSystem();
        // Normally, the in-progress edit log would be finalized by
        // FSEditLog#endCurrentLogSegment.  For testing purposes, we
        // disable that here.
        FSEditLog spyLog = spy(cluster.getNameNode().getFSImage().getEditLog());
        doNothing().when(spyLog).endCurrentLogSegment(Mockito.anyBoolean());
        DFSTestUtil.setEditLogForTesting(cluster.getNamesystem(), spyLog);
        // create file1.
        Path dir1 = new Path("/user/a+b/dir1");
        Path file1 = new Path(dir1, "file1");
        FSDataOutputStream stm1 = TestFileCreation.createFile(fs, file1, 1);
        System.out.println("testFileCreationDeleteParent: " + "Created file " + file1);
        TestFileCreation.writeFile(stm1);
        stm1.hflush();
        // create file2.
        Path dir2 = new Path("/user/dir2");
        Path file2 = new Path(dir2, "file2");
        FSDataOutputStream stm2 = TestFileCreation.createFile(fs, file2, 1);
        System.out.println("testFileCreationDeleteParent: " + "Created file " + file2);
        TestFileCreation.writeFile(stm2);
        stm2.hflush();
        // move dir1 while file1 is open
        Path dir3 = new Path("/user/dir3");
        fs.mkdirs(dir3);
        fs.rename(dir1, dir3);
        // create file3
        Path file3 = new Path(dir3, "file3");
        FSDataOutputStream stm3 = fs.create(file3);
        fs.rename(file3, new Path(dir3, "bozo"));
        // Get a new block for the file.
        TestFileCreation.writeFile(stm3, TestFileCreation.blockSize + 1);
        stm3.hflush();
        // Stop the NameNode before closing the files.
        // This will ensure that the write leases are still active and present
        // in the edit log.  Simiarly, there should be a pending ADD_BLOCK_OP
        // for file3, since we just added a block to that file.
        cluster.getNameNode().stop();
        // Restart cluster.
        cluster.shutdown();
        try {
            Thread.sleep(2 * MAX_IDLE_TIME);
        } catch (InterruptedException e) {
        }
        cluster = new MiniDFSCluster.Builder(conf).format(false).build();
        cluster.waitActive();
        // restart cluster yet again. This triggers the code to read in
        // persistent leases from the edit log.
        cluster.shutdown();
        try {
            Thread.sleep(5000);
        } catch (InterruptedException e) {
        }
        cluster = new MiniDFSCluster.Builder(conf).format(false).build();
        cluster.waitActive();
        fs = cluster.getFileSystem();
        Path newfile = new Path("/user/dir3/dir1", "file1");
        assertTrue(!fs.exists(file1));
        assertTrue(fs.exists(file2));
        assertTrue(fs.exists(newfile));
        checkFullFile(fs, newfile);
    } finally {
        fs.close();
        cluster.shutdown();
    }
}
Also used : Path(org.apache.hadoop.fs.Path) Configuration(org.apache.hadoop.conf.Configuration) FSEditLog(org.apache.hadoop.hdfs.server.namenode.FSEditLog) FileSystem(org.apache.hadoop.fs.FileSystem) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream) Test(org.junit.Test)

Example 2 with FSEditLog

use of org.apache.hadoop.hdfs.server.namenode.FSEditLog in project hadoop by apache.

the class TestLeaseRecovery2 method hardLeaseRecoveryRestartHelper.

public void hardLeaseRecoveryRestartHelper(boolean doRename, int size) throws Exception {
    if (size < 0) {
        size = AppendTestUtil.nextInt(FILE_SIZE + 1);
    }
    //create a file
    String fileStr = "/hardLeaseRecovery";
    AppendTestUtil.LOG.info("filestr=" + fileStr);
    Path filePath = new Path(fileStr);
    FSDataOutputStream stm = dfs.create(filePath, true, BUF_SIZE, REPLICATION_NUM, BLOCK_SIZE);
    assertTrue(dfs.dfs.exists(fileStr));
    // write bytes into the file.
    AppendTestUtil.LOG.info("size=" + size);
    stm.write(buffer, 0, size);
    String originalLeaseHolder = NameNodeAdapter.getLeaseHolderForPath(cluster.getNameNode(), fileStr);
    assertFalse("original lease holder should not be the NN", originalLeaseHolder.equals(HdfsServerConstants.NAMENODE_LEASE_HOLDER));
    // hflush file
    AppendTestUtil.LOG.info("hflush");
    stm.hflush();
    // check visible length
    final HdfsDataInputStream in = (HdfsDataInputStream) dfs.open(filePath);
    Assert.assertEquals(size, in.getVisibleLength());
    in.close();
    if (doRename) {
        fileStr += ".renamed";
        Path renamedPath = new Path(fileStr);
        assertTrue(dfs.rename(filePath, renamedPath));
        filePath = renamedPath;
    }
    // kill the lease renewal thread
    AppendTestUtil.LOG.info("leasechecker.interruptAndJoin()");
    dfs.dfs.getLeaseRenewer().interruptAndJoin();
    // won't actually get completed during lease recovery.
    for (DataNode dn : cluster.getDataNodes()) {
        DataNodeTestUtils.setHeartbeatsDisabledForTests(dn, true);
    }
    // set the hard limit to be 1 second 
    cluster.setLeasePeriod(LONG_LEASE_PERIOD, SHORT_LEASE_PERIOD);
    // Make sure lease recovery begins.
    final String path = fileStr;
    GenericTestUtils.waitFor(new Supplier<Boolean>() {

        @Override
        public Boolean get() {
            return HdfsServerConstants.NAMENODE_LEASE_HOLDER.equals(NameNodeAdapter.getLeaseHolderForPath(cluster.getNameNode(), path));
        }
    }, (int) SHORT_LEASE_PERIOD, (int) SHORT_LEASE_PERIOD * 10);
    // Normally, the in-progress edit log would be finalized by
    // FSEditLog#endCurrentLogSegment.  For testing purposes, we
    // disable that here.
    FSEditLog spyLog = spy(cluster.getNameNode().getFSImage().getEditLog());
    doNothing().when(spyLog).endCurrentLogSegment(Mockito.anyBoolean());
    DFSTestUtil.setEditLogForTesting(cluster.getNamesystem(), spyLog);
    cluster.restartNameNode(false);
    checkLease(fileStr, size);
    // Let the DNs send heartbeats again.
    for (DataNode dn : cluster.getDataNodes()) {
        DataNodeTestUtils.setHeartbeatsDisabledForTests(dn, false);
    }
    cluster.waitActive();
    // set the hard limit to be 1 second, to initiate lease recovery. 
    cluster.setLeasePeriod(LONG_LEASE_PERIOD, SHORT_LEASE_PERIOD);
    // wait for lease recovery to complete
    LocatedBlocks locatedBlocks;
    do {
        Thread.sleep(SHORT_LEASE_PERIOD);
        locatedBlocks = dfs.dfs.getLocatedBlocks(fileStr, 0L, size);
    } while (locatedBlocks.isUnderConstruction());
    assertEquals(size, locatedBlocks.getFileLength());
    // make sure that the client can't write data anymore.
    try {
        stm.write('b');
        stm.hflush();
        fail("Should not be able to flush after we've lost the lease");
    } catch (IOException e) {
        LOG.info("Expceted exception on write/hflush", e);
    }
    try {
        stm.close();
        fail("Should not be able to close after we've lost the lease");
    } catch (IOException e) {
        LOG.info("Expected exception on close", e);
    }
    // verify data
    AppendTestUtil.LOG.info("File size is good. Now validating sizes from datanodes...");
    AppendTestUtil.checkFullFile(dfs, filePath, size, buffer, fileStr);
}
Also used : Path(org.apache.hadoop.fs.Path) FSEditLog(org.apache.hadoop.hdfs.server.namenode.FSEditLog) DataNode(org.apache.hadoop.hdfs.server.datanode.DataNode) LocatedBlocks(org.apache.hadoop.hdfs.protocol.LocatedBlocks) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream) IOException(java.io.IOException) HdfsDataInputStream(org.apache.hadoop.hdfs.client.HdfsDataInputStream)

Example 3 with FSEditLog

use of org.apache.hadoop.hdfs.server.namenode.FSEditLog in project hadoop by apache.

the class TestFailureToReadEdits method causeFailureOnEditLogRead.

private LimitedEditLogAnswer causeFailureOnEditLogRead() throws IOException {
    FSEditLog spyEditLog = NameNodeAdapter.spyOnEditLog(nn1);
    LimitedEditLogAnswer answer = new LimitedEditLogAnswer();
    doAnswer(answer).when(spyEditLog).selectInputStreams(anyLong(), anyLong(), (MetaRecoveryContext) anyObject(), anyBoolean(), anyBoolean());
    return answer;
}
Also used : FSEditLog(org.apache.hadoop.hdfs.server.namenode.FSEditLog)

Aggregations

FSEditLog (org.apache.hadoop.hdfs.server.namenode.FSEditLog)3 FSDataOutputStream (org.apache.hadoop.fs.FSDataOutputStream)2 Path (org.apache.hadoop.fs.Path)2 IOException (java.io.IOException)1 Configuration (org.apache.hadoop.conf.Configuration)1 FileSystem (org.apache.hadoop.fs.FileSystem)1 HdfsDataInputStream (org.apache.hadoop.hdfs.client.HdfsDataInputStream)1 LocatedBlocks (org.apache.hadoop.hdfs.protocol.LocatedBlocks)1 DataNode (org.apache.hadoop.hdfs.server.datanode.DataNode)1 Test (org.junit.Test)1