use of org.apache.hadoop.hdfs.protocol.AlreadyBeingCreatedException in project hadoop by apache.
the class TestLeaseRecovery2 method testSoftLeaseRecovery.
/**
* This test makes the client does not renew its lease and also
* set the soft lease expiration period to be short 1s. Thus triggering
* soft lease expiration to happen immediately by having another client
* trying to create the same file.
*
* The test makes sure that the lease recovery completes.
*
* @throws Exception
*/
@Test
public void testSoftLeaseRecovery() throws Exception {
Map<String, String[]> u2g_map = new HashMap<String, String[]>(1);
u2g_map.put(fakeUsername, new String[] { fakeGroup });
DFSTestUtil.updateConfWithFakeGroupMapping(conf, u2g_map);
// Reset default lease periods
cluster.setLeasePeriod(HdfsConstants.LEASE_SOFTLIMIT_PERIOD, HdfsConstants.LEASE_HARDLIMIT_PERIOD);
//create a file
// create a random file name
String filestr = "/foo" + AppendTestUtil.nextInt();
AppendTestUtil.LOG.info("filestr=" + filestr);
Path filepath = new Path(filestr);
FSDataOutputStream stm = dfs.create(filepath, true, BUF_SIZE, REPLICATION_NUM, BLOCK_SIZE);
assertTrue(dfs.dfs.exists(filestr));
// write random number of bytes into it.
int size = AppendTestUtil.nextInt(FILE_SIZE);
AppendTestUtil.LOG.info("size=" + size);
stm.write(buffer, 0, size);
// hflush file
AppendTestUtil.LOG.info("hflush");
stm.hflush();
AppendTestUtil.LOG.info("leasechecker.interruptAndJoin()");
dfs.dfs.getLeaseRenewer().interruptAndJoin();
// set the soft limit to be 1 second so that the
// namenode triggers lease recovery on next attempt to write-for-open.
cluster.setLeasePeriod(SHORT_LEASE_PERIOD, LONG_LEASE_PERIOD);
// try to re-open the file before closing the previous handle. This
// should fail but will trigger lease recovery.
{
UserGroupInformation ugi = UserGroupInformation.createUserForTesting(fakeUsername, new String[] { fakeGroup });
FileSystem dfs2 = DFSTestUtil.getFileSystemAs(ugi, conf);
boolean done = false;
for (int i = 0; i < 10 && !done; i++) {
AppendTestUtil.LOG.info("i=" + i);
try {
dfs2.create(filepath, false, BUF_SIZE, REPLICATION_NUM, BLOCK_SIZE);
fail("Creation of an existing file should never succeed.");
} catch (FileAlreadyExistsException ex) {
done = true;
} catch (AlreadyBeingCreatedException ex) {
AppendTestUtil.LOG.info("GOOD! got " + ex.getMessage());
} catch (IOException ioe) {
AppendTestUtil.LOG.warn("UNEXPECTED IOException", ioe);
}
if (!done) {
AppendTestUtil.LOG.info("sleep " + 5000 + "ms");
try {
Thread.sleep(5000);
} catch (InterruptedException e) {
}
}
}
assertTrue(done);
}
AppendTestUtil.LOG.info("Lease for file " + filepath + " is recovered. " + "Validating its contents now...");
// verify that file-size matches
long fileSize = dfs.getFileStatus(filepath).getLen();
assertTrue("File should be " + size + " bytes, but is actually " + " found to be " + fileSize + " bytes", fileSize == size);
// verify data
AppendTestUtil.LOG.info("File size is good. " + "Now validating data and sizes from datanodes...");
AppendTestUtil.checkFullFile(dfs, filepath, size, buffer, filestr);
}
Aggregations