Search in sources :

Example 6 with AlreadyBeingCreatedException

use of org.apache.hadoop.hdfs.protocol.AlreadyBeingCreatedException in project hadoop by apache.

the class TestLeaseRecovery2 method testSoftLeaseRecovery.

/**
   * This test makes the client does not renew its lease and also
   * set the soft lease expiration period to be short 1s. Thus triggering
   * soft lease expiration to happen immediately by having another client
   * trying to create the same file.
   * 
   * The test makes sure that the lease recovery completes.
   * 
   * @throws Exception
   */
@Test
public void testSoftLeaseRecovery() throws Exception {
    Map<String, String[]> u2g_map = new HashMap<String, String[]>(1);
    u2g_map.put(fakeUsername, new String[] { fakeGroup });
    DFSTestUtil.updateConfWithFakeGroupMapping(conf, u2g_map);
    // Reset default lease periods
    cluster.setLeasePeriod(HdfsConstants.LEASE_SOFTLIMIT_PERIOD, HdfsConstants.LEASE_HARDLIMIT_PERIOD);
    //create a file
    // create a random file name
    String filestr = "/foo" + AppendTestUtil.nextInt();
    AppendTestUtil.LOG.info("filestr=" + filestr);
    Path filepath = new Path(filestr);
    FSDataOutputStream stm = dfs.create(filepath, true, BUF_SIZE, REPLICATION_NUM, BLOCK_SIZE);
    assertTrue(dfs.dfs.exists(filestr));
    // write random number of bytes into it.
    int size = AppendTestUtil.nextInt(FILE_SIZE);
    AppendTestUtil.LOG.info("size=" + size);
    stm.write(buffer, 0, size);
    // hflush file
    AppendTestUtil.LOG.info("hflush");
    stm.hflush();
    AppendTestUtil.LOG.info("leasechecker.interruptAndJoin()");
    dfs.dfs.getLeaseRenewer().interruptAndJoin();
    // set the soft limit to be 1 second so that the
    // namenode triggers lease recovery on next attempt to write-for-open.
    cluster.setLeasePeriod(SHORT_LEASE_PERIOD, LONG_LEASE_PERIOD);
    // try to re-open the file before closing the previous handle. This
    // should fail but will trigger lease recovery.
    {
        UserGroupInformation ugi = UserGroupInformation.createUserForTesting(fakeUsername, new String[] { fakeGroup });
        FileSystem dfs2 = DFSTestUtil.getFileSystemAs(ugi, conf);
        boolean done = false;
        for (int i = 0; i < 10 && !done; i++) {
            AppendTestUtil.LOG.info("i=" + i);
            try {
                dfs2.create(filepath, false, BUF_SIZE, REPLICATION_NUM, BLOCK_SIZE);
                fail("Creation of an existing file should never succeed.");
            } catch (FileAlreadyExistsException ex) {
                done = true;
            } catch (AlreadyBeingCreatedException ex) {
                AppendTestUtil.LOG.info("GOOD! got " + ex.getMessage());
            } catch (IOException ioe) {
                AppendTestUtil.LOG.warn("UNEXPECTED IOException", ioe);
            }
            if (!done) {
                AppendTestUtil.LOG.info("sleep " + 5000 + "ms");
                try {
                    Thread.sleep(5000);
                } catch (InterruptedException e) {
                }
            }
        }
        assertTrue(done);
    }
    AppendTestUtil.LOG.info("Lease for file " + filepath + " is recovered. " + "Validating its contents now...");
    // verify that file-size matches
    long fileSize = dfs.getFileStatus(filepath).getLen();
    assertTrue("File should be " + size + " bytes, but is actually " + " found to be " + fileSize + " bytes", fileSize == size);
    // verify data
    AppendTestUtil.LOG.info("File size is good. " + "Now validating data and sizes from datanodes...");
    AppendTestUtil.checkFullFile(dfs, filepath, size, buffer, filestr);
}
Also used : Path(org.apache.hadoop.fs.Path) FileAlreadyExistsException(org.apache.hadoop.fs.FileAlreadyExistsException) AlreadyBeingCreatedException(org.apache.hadoop.hdfs.protocol.AlreadyBeingCreatedException) HashMap(java.util.HashMap) IOException(java.io.IOException) FileSystem(org.apache.hadoop.fs.FileSystem) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream) UserGroupInformation(org.apache.hadoop.security.UserGroupInformation) Test(org.junit.Test)

Aggregations

AlreadyBeingCreatedException (org.apache.hadoop.hdfs.protocol.AlreadyBeingCreatedException)6 IOException (java.io.IOException)4 FSDataOutputStream (org.apache.hadoop.fs.FSDataOutputStream)2 FileAlreadyExistsException (org.apache.hadoop.fs.FileAlreadyExistsException)2 FileSystem (org.apache.hadoop.fs.FileSystem)2 Path (org.apache.hadoop.fs.Path)2 BlockInfo (org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo)2 RemoteException (org.apache.hadoop.ipc.RemoteException)2 Test (org.junit.Test)2 HashMap (java.util.HashMap)1 HdfsDataOutputStream (org.apache.hadoop.hdfs.client.HdfsDataOutputStream)1 Block (org.apache.hadoop.hdfs.protocol.Block)1 ExtendedBlock (org.apache.hadoop.hdfs.protocol.ExtendedBlock)1 LocatedBlock (org.apache.hadoop.hdfs.protocol.LocatedBlock)1 RecoveryInProgressException (org.apache.hadoop.hdfs.protocol.RecoveryInProgressException)1 BlockUnderConstructionFeature (org.apache.hadoop.hdfs.server.blockmanagement.BlockUnderConstructionFeature)1 BlockUCState (org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState)1 Lease (org.apache.hadoop.hdfs.server.namenode.LeaseManager.Lease)1 FileHandle (org.apache.hadoop.nfs.nfs3.FileHandle)1 Nfs3FileAttributes (org.apache.hadoop.nfs.nfs3.Nfs3FileAttributes)1