Search in sources :

Example 1 with LeaseRenewer

use of org.apache.hadoop.hdfs.client.impl.LeaseRenewer in project hadoop by apache.

the class TestDFSClientRetries method testLeaseRenewSocketTimeout.

/**
   * Test DFSClient can continue to function after renewLease RPC
   * receives SocketTimeoutException.
   */
@Test
public void testLeaseRenewSocketTimeout() throws Exception {
    String file1 = "/testFile1";
    String file2 = "/testFile2";
    // Set short retry timeouts so this test runs faster
    conf.setInt(DFSConfigKeys.DFS_CLIENT_RETRY_WINDOW_BASE, 10);
    conf.setInt(DFS_CLIENT_SOCKET_TIMEOUT_KEY, 2 * 1000);
    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
    try {
        cluster.waitActive();
        NamenodeProtocols spyNN = spy(cluster.getNameNodeRpc());
        Mockito.doThrow(new SocketTimeoutException()).when(spyNN).renewLease(Mockito.anyString());
        DFSClient client = new DFSClient(null, spyNN, conf, null);
        // Get hold of the lease renewer instance used by the client
        LeaseRenewer leaseRenewer = client.getLeaseRenewer();
        leaseRenewer.setRenewalTime(100);
        OutputStream out1 = client.create(file1, false);
        Mockito.verify(spyNN, timeout(10000).times(1)).renewLease(Mockito.anyString());
        verifyEmptyLease(leaseRenewer);
        try {
            out1.write(new byte[256]);
            fail("existing output stream should be aborted");
        } catch (IOException e) {
        }
        // Verify DFSClient can do read operation after renewLease aborted.
        client.exists(file2);
        // Verify DFSClient can do write operation after renewLease no longer
        // throws SocketTimeoutException.
        Mockito.doNothing().when(spyNN).renewLease(Mockito.anyString());
        leaseRenewer = client.getLeaseRenewer();
        leaseRenewer.setRenewalTime(100);
        OutputStream out2 = client.create(file2, false);
        Mockito.verify(spyNN, timeout(10000).times(2)).renewLease(Mockito.anyString());
        out2.write(new byte[256]);
        out2.close();
        verifyEmptyLease(leaseRenewer);
    } finally {
        cluster.shutdown();
    }
}
Also used : NamenodeProtocols(org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols) SocketTimeoutException(java.net.SocketTimeoutException) LeaseRenewer(org.apache.hadoop.hdfs.client.impl.LeaseRenewer) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream) OutputStream(java.io.OutputStream) Matchers.anyString(org.mockito.Matchers.anyString) IOException(java.io.IOException) Test(org.junit.Test)

Example 2 with LeaseRenewer

use of org.apache.hadoop.hdfs.client.impl.LeaseRenewer in project hadoop by apache.

the class TestLease method testLeaseAbort.

@Test
public void testLeaseAbort() throws Exception {
    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
    try {
        cluster.waitActive();
        NamenodeProtocols preSpyNN = cluster.getNameNodeRpc();
        NamenodeProtocols spyNN = spy(preSpyNN);
        DFSClient dfs = new DFSClient(null, spyNN, conf, null);
        byte[] buf = new byte[1024];
        FSDataOutputStream c_out = createFsOut(dfs, dirString + "c");
        c_out.write(buf, 0, 1024);
        c_out.close();
        DFSInputStream c_in = dfs.open(dirString + "c");
        FSDataOutputStream d_out = createFsOut(dfs, dirString + "d");
        // stub the renew method.
        doThrow(new RemoteException(InvalidToken.class.getName(), "Your token is worthless")).when(spyNN).renewLease(anyString());
        // We don't need to wait the lease renewer thread to act.
        // call renewLease() manually.
        // make it look like the soft limit has been exceeded.
        LeaseRenewer originalRenewer = dfs.getLeaseRenewer();
        dfs.lastLeaseRenewal = Time.monotonicNow() - HdfsConstants.LEASE_SOFTLIMIT_PERIOD - 1000;
        try {
            dfs.renewLease();
        } catch (IOException e) {
        }
        // renewing.
        try {
            d_out.write(buf, 0, 1024);
            LOG.info("Write worked beyond the soft limit as expected.");
        } catch (IOException e) {
            Assert.fail("Write failed.");
        }
        // make it look like the hard limit has been exceeded.
        dfs.lastLeaseRenewal = Time.monotonicNow() - HdfsConstants.LEASE_HARDLIMIT_PERIOD - 1000;
        dfs.renewLease();
        // this should not work.
        try {
            d_out.write(buf, 0, 1024);
            d_out.close();
            Assert.fail("Write did not fail even after the fatal lease renewal failure");
        } catch (IOException e) {
            LOG.info("Write failed as expected. ", e);
        }
        // If aborted, the renewer should be empty. (no reference to clients)
        Thread.sleep(1000);
        Assert.assertTrue(originalRenewer.isEmpty());
        // unstub
        doNothing().when(spyNN).renewLease(anyString());
        // existing input streams should work
        try {
            int num = c_in.read(buf, 0, 1);
            if (num != 1) {
                Assert.fail("Failed to read 1 byte");
            }
            c_in.close();
        } catch (IOException e) {
            LOG.error("Read failed with ", e);
            Assert.fail("Read after lease renewal failure failed");
        }
        // new file writes should work.
        try {
            c_out = createFsOut(dfs, dirString + "c");
            c_out.write(buf, 0, 1024);
            c_out.close();
        } catch (IOException e) {
            LOG.error("Write failed with ", e);
            Assert.fail("Write failed");
        }
    } finally {
        cluster.shutdown();
    }
}
Also used : NamenodeProtocols(org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols) LeaseRenewer(org.apache.hadoop.hdfs.client.impl.LeaseRenewer) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream) IOException(java.io.IOException) RemoteException(org.apache.hadoop.ipc.RemoteException) Test(org.junit.Test)

Example 3 with LeaseRenewer

use of org.apache.hadoop.hdfs.client.impl.LeaseRenewer in project hadoop by apache.

the class TestDistributedFileSystem method testDFSCloseFilesBeingWritten.

@Test
public void testDFSCloseFilesBeingWritten() throws Exception {
    Configuration conf = getTestConfiguration();
    MiniDFSCluster cluster = null;
    try {
        cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
        DistributedFileSystem fileSys = cluster.getFileSystem();
        // Create one file then delete it to trigger the FileNotFoundException
        // when closing the file.
        fileSys.create(new Path("/test/dfsclose/file-0"));
        fileSys.delete(new Path("/test/dfsclose/file-0"), true);
        DFSClient dfsClient = fileSys.getClient();
        // Construct a new dfsClient to get the same LeaseRenewer instance,
        // to avoid the original client being added to the leaseRenewer again.
        DFSClient newDfsClient = new DFSClient(cluster.getFileSystem(0).getUri(), conf);
        LeaseRenewer leaseRenewer = newDfsClient.getLeaseRenewer();
        dfsClient.closeAllFilesBeingWritten(false);
        // Remove new dfsClient in leaseRenewer
        leaseRenewer.closeClient(newDfsClient);
        // The list of clients corresponding to this renewer should be empty
        assertEquals(true, leaseRenewer.isEmpty());
        assertEquals(true, dfsClient.isFilesBeingWrittenEmpty());
    } finally {
        if (cluster != null) {
            cluster.shutdown();
        }
    }
}
Also used : Path(org.apache.hadoop.fs.Path) Configuration(org.apache.hadoop.conf.Configuration) LeaseRenewer(org.apache.hadoop.hdfs.client.impl.LeaseRenewer) Test(org.junit.Test)

Aggregations

LeaseRenewer (org.apache.hadoop.hdfs.client.impl.LeaseRenewer)3 Test (org.junit.Test)3 IOException (java.io.IOException)2 FSDataOutputStream (org.apache.hadoop.fs.FSDataOutputStream)2 NamenodeProtocols (org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols)2 OutputStream (java.io.OutputStream)1 SocketTimeoutException (java.net.SocketTimeoutException)1 Configuration (org.apache.hadoop.conf.Configuration)1 Path (org.apache.hadoop.fs.Path)1 RemoteException (org.apache.hadoop.ipc.RemoteException)1 Matchers.anyString (org.mockito.Matchers.anyString)1