Search in sources :

Example 11 with RemoteException

use of org.apache.hadoop.ipc.RemoteException in project hadoop by apache.

the class TestHDFSConcat method testConcatWithQuotaIncrease.

@Test
public void testConcatWithQuotaIncrease() throws IOException {
    final short repl = 3;
    final int srcNum = 10;
    final Path foo = new Path("/foo");
    final Path bar = new Path(foo, "bar");
    final Path[] srcs = new Path[srcNum];
    final Path target = new Path(bar, "target");
    DFSTestUtil.createFile(dfs, target, blockSize, repl, 0L);
    final long dsQuota = blockSize * repl + blockSize * srcNum * REPL_FACTOR;
    dfs.setQuota(foo, Long.MAX_VALUE - 1, dsQuota);
    for (int i = 0; i < srcNum; i++) {
        srcs[i] = new Path(bar, "src" + i);
        DFSTestUtil.createFile(dfs, srcs[i], blockSize, REPL_FACTOR, 0L);
    }
    ContentSummary summary = dfs.getContentSummary(bar);
    Assert.assertEquals(11, summary.getFileCount());
    Assert.assertEquals(dsQuota, summary.getSpaceConsumed());
    try {
        dfs.concat(target, srcs);
        fail("QuotaExceededException expected");
    } catch (RemoteException e) {
        Assert.assertTrue(e.unwrapRemoteException() instanceof QuotaExceededException);
    }
    dfs.setQuota(foo, Long.MAX_VALUE - 1, Long.MAX_VALUE - 1);
    dfs.concat(target, srcs);
    summary = dfs.getContentSummary(bar);
    Assert.assertEquals(1, summary.getFileCount());
    Assert.assertEquals(blockSize * repl * (srcNum + 1), summary.getSpaceConsumed());
}
Also used : Path(org.apache.hadoop.fs.Path) QuotaExceededException(org.apache.hadoop.hdfs.protocol.QuotaExceededException) ContentSummary(org.apache.hadoop.fs.ContentSummary) RemoteException(org.apache.hadoop.ipc.RemoteException) Test(org.junit.Test)

Example 12 with RemoteException

use of org.apache.hadoop.ipc.RemoteException in project hadoop by apache.

the class TestConnectionRetryPolicy method testDefaultRetryPolicyEquivalence.

@Test(timeout = 60000)
public void testDefaultRetryPolicyEquivalence() {
    RetryPolicy rp1 = null;
    RetryPolicy rp2 = null;
    RetryPolicy rp3 = null;
    /* test the same setting */
    rp1 = getDefaultRetryPolicy(true, "10000,2");
    rp2 = getDefaultRetryPolicy(true, "10000,2");
    rp3 = getDefaultRetryPolicy(true, "10000,2");
    verifyRetryPolicyEquivalence(new RetryPolicy[] { rp1, rp2, rp3 });
    /* test different remoteExceptionToRetry */
    rp1 = getDefaultRetryPolicy(true, "10000,2", new RemoteException(PathIOException.class.getName(), "path IO exception").getClassName());
    rp2 = getDefaultRetryPolicy(true, "10000,2", new RemoteException(RpcNoSuchMethodException.class.getName(), "no such method exception").getClassName());
    rp3 = getDefaultRetryPolicy(true, "10000,2", new RemoteException(RetriableException.class.getName(), "retriable exception").getClassName());
    verifyRetryPolicyEquivalence(new RetryPolicy[] { rp1, rp2, rp3 });
    /* test enabled and different specifications */
    rp1 = getDefaultRetryPolicy(true, "20000,3");
    rp2 = getDefaultRetryPolicy(true, "30000,4");
    assertNotEquals("should not be equal", rp1, rp2);
    assertNotEquals("should not have the same hash code", rp1.hashCode(), rp2.hashCode());
    /* test disabled and the same specifications */
    rp1 = getDefaultRetryPolicy(false, "40000,5");
    rp2 = getDefaultRetryPolicy(false, "40000,5");
    assertEquals("should be equal", rp1, rp2);
    assertEquals("should have the same hash code", rp1, rp2);
    /* test the disabled and different specifications */
    rp1 = getDefaultRetryPolicy(false, "50000,6");
    rp2 = getDefaultRetryPolicy(false, "60000,7");
    assertEquals("should be equal", rp1, rp2);
    assertEquals("should have the same hash code", rp1, rp2);
}
Also used : RemoteException(org.apache.hadoop.ipc.RemoteException) Test(org.junit.Test)

Example 13 with RemoteException

use of org.apache.hadoop.ipc.RemoteException in project hadoop by apache.

the class RpcProgramNfs3 method getattr.

@VisibleForTesting
GETATTR3Response getattr(XDR xdr, SecurityHandler securityHandler, SocketAddress remoteAddress) {
    GETATTR3Response response = new GETATTR3Response(Nfs3Status.NFS3_OK);
    if (!checkAccessPrivilege(remoteAddress, AccessPrivilege.READ_ONLY)) {
        response.setStatus(Nfs3Status.NFS3ERR_ACCES);
        return response;
    }
    DFSClient dfsClient = clientCache.getDfsClient(securityHandler.getUser());
    if (dfsClient == null) {
        response.setStatus(Nfs3Status.NFS3ERR_SERVERFAULT);
        return response;
    }
    GETATTR3Request request;
    try {
        request = GETATTR3Request.deserialize(xdr);
    } catch (IOException e) {
        LOG.error("Invalid GETATTR request");
        response.setStatus(Nfs3Status.NFS3ERR_INVAL);
        return response;
    }
    FileHandle handle = request.getHandle();
    if (LOG.isDebugEnabled()) {
        LOG.debug("GETATTR for fileId: " + handle.getFileId() + " client: " + remoteAddress);
    }
    Nfs3FileAttributes attrs = null;
    try {
        attrs = writeManager.getFileAttr(dfsClient, handle, iug);
    } catch (RemoteException r) {
        LOG.warn("Exception ", r);
        IOException io = r.unwrapRemoteException();
        /**
       * AuthorizationException can be thrown if the user can't be proxy'ed.
       */
        if (io instanceof AuthorizationException) {
            return new GETATTR3Response(Nfs3Status.NFS3ERR_ACCES);
        } else {
            return new GETATTR3Response(Nfs3Status.NFS3ERR_IO);
        }
    } catch (IOException e) {
        LOG.info("Can't get file attribute, fileId=" + handle.getFileId(), e);
        int status = mapErrorStatus(e);
        response.setStatus(status);
        return response;
    }
    if (attrs == null) {
        LOG.error("Can't get path for fileId: " + handle.getFileId());
        response.setStatus(Nfs3Status.NFS3ERR_STALE);
        return response;
    }
    response.setPostOpAttr(attrs);
    return response;
}
Also used : DFSClient(org.apache.hadoop.hdfs.DFSClient) AuthorizationException(org.apache.hadoop.security.authorize.AuthorizationException) FileHandle(org.apache.hadoop.nfs.nfs3.FileHandle) Nfs3FileAttributes(org.apache.hadoop.nfs.nfs3.Nfs3FileAttributes) IOException(java.io.IOException) GETATTR3Response(org.apache.hadoop.nfs.nfs3.response.GETATTR3Response) RemoteException(org.apache.hadoop.ipc.RemoteException) GETATTR3Request(org.apache.hadoop.nfs.nfs3.request.GETATTR3Request) VisibleForTesting(com.google.common.annotations.VisibleForTesting)

Example 14 with RemoteException

use of org.apache.hadoop.ipc.RemoteException in project hadoop by apache.

the class WriteManager method handleWrite.

void handleWrite(DFSClient dfsClient, WRITE3Request request, Channel channel, int xid, Nfs3FileAttributes preOpAttr) throws IOException {
    int count = request.getCount();
    byte[] data = request.getData().array();
    if (data.length < count) {
        WRITE3Response response = new WRITE3Response(Nfs3Status.NFS3ERR_INVAL);
        Nfs3Utils.writeChannel(channel, response.serialize(new XDR(), xid, new VerifierNone()), xid);
        return;
    }
    FileHandle handle = request.getHandle();
    if (LOG.isDebugEnabled()) {
        LOG.debug("handleWrite " + request);
    }
    // Check if there is a stream to write
    FileHandle fileHandle = request.getHandle();
    OpenFileCtx openFileCtx = fileContextCache.get(fileHandle);
    if (openFileCtx == null) {
        LOG.info("No opened stream for fileId: " + fileHandle.getFileId());
        String fileIdPath = Nfs3Utils.getFileIdPath(fileHandle.getFileId());
        HdfsDataOutputStream fos = null;
        Nfs3FileAttributes latestAttr = null;
        try {
            int bufferSize = config.getInt(CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_KEY, CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_DEFAULT);
            fos = dfsClient.append(fileIdPath, bufferSize, EnumSet.of(CreateFlag.APPEND), null, null);
            latestAttr = Nfs3Utils.getFileAttr(dfsClient, fileIdPath, iug);
        } catch (RemoteException e) {
            IOException io = e.unwrapRemoteException();
            if (io instanceof AlreadyBeingCreatedException) {
                LOG.warn("Can't append file: " + fileIdPath + ". Possibly the file is being closed. Drop the request: " + request + ", wait for the client to retry...");
                return;
            }
            throw e;
        } catch (IOException e) {
            LOG.error("Can't append to file: " + fileIdPath, e);
            if (fos != null) {
                fos.close();
            }
            WccData fileWcc = new WccData(Nfs3Utils.getWccAttr(preOpAttr), preOpAttr);
            WRITE3Response response = new WRITE3Response(Nfs3Status.NFS3ERR_IO, fileWcc, count, request.getStableHow(), Nfs3Constant.WRITE_COMMIT_VERF);
            Nfs3Utils.writeChannel(channel, response.serialize(new XDR(), xid, new VerifierNone()), xid);
            return;
        }
        // Add open stream
        String writeDumpDir = config.get(NfsConfigKeys.DFS_NFS_FILE_DUMP_DIR_KEY, NfsConfigKeys.DFS_NFS_FILE_DUMP_DIR_DEFAULT);
        openFileCtx = new OpenFileCtx(fos, latestAttr, writeDumpDir + "/" + fileHandle.getFileId(), dfsClient, iug, aixCompatMode, config);
        if (!addOpenFileStream(fileHandle, openFileCtx)) {
            LOG.info("Can't add new stream. Close it. Tell client to retry.");
            try {
                fos.close();
            } catch (IOException e) {
                LOG.error("Can't close stream for fileId: " + handle.getFileId(), e);
            }
            // Notify client to retry
            WccData fileWcc = new WccData(latestAttr.getWccAttr(), latestAttr);
            WRITE3Response response = new WRITE3Response(Nfs3Status.NFS3ERR_JUKEBOX, fileWcc, 0, request.getStableHow(), Nfs3Constant.WRITE_COMMIT_VERF);
            Nfs3Utils.writeChannel(channel, response.serialize(new XDR(), xid, new VerifierNone()), xid);
            return;
        }
        if (LOG.isDebugEnabled()) {
            LOG.debug("Opened stream for appending file: " + fileHandle.getFileId());
        }
    }
    // Add write into the async job queue
    openFileCtx.receivedNewWrite(dfsClient, request, channel, xid, asyncDataService, iug);
    return;
}
Also used : WccData(org.apache.hadoop.nfs.nfs3.response.WccData) AlreadyBeingCreatedException(org.apache.hadoop.hdfs.protocol.AlreadyBeingCreatedException) FileHandle(org.apache.hadoop.nfs.nfs3.FileHandle) XDR(org.apache.hadoop.oncrpc.XDR) Nfs3FileAttributes(org.apache.hadoop.nfs.nfs3.Nfs3FileAttributes) IOException(java.io.IOException) VerifierNone(org.apache.hadoop.oncrpc.security.VerifierNone) HdfsDataOutputStream(org.apache.hadoop.hdfs.client.HdfsDataOutputStream) WRITE3Response(org.apache.hadoop.nfs.nfs3.response.WRITE3Response) RemoteException(org.apache.hadoop.ipc.RemoteException)

Example 15 with RemoteException

use of org.apache.hadoop.ipc.RemoteException in project hadoop by apache.

the class TestLease method testLeaseAbort.

@Test
public void testLeaseAbort() throws Exception {
    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
    try {
        cluster.waitActive();
        NamenodeProtocols preSpyNN = cluster.getNameNodeRpc();
        NamenodeProtocols spyNN = spy(preSpyNN);
        DFSClient dfs = new DFSClient(null, spyNN, conf, null);
        byte[] buf = new byte[1024];
        FSDataOutputStream c_out = createFsOut(dfs, dirString + "c");
        c_out.write(buf, 0, 1024);
        c_out.close();
        DFSInputStream c_in = dfs.open(dirString + "c");
        FSDataOutputStream d_out = createFsOut(dfs, dirString + "d");
        // stub the renew method.
        doThrow(new RemoteException(InvalidToken.class.getName(), "Your token is worthless")).when(spyNN).renewLease(anyString());
        // We don't need to wait the lease renewer thread to act.
        // call renewLease() manually.
        // make it look like the soft limit has been exceeded.
        LeaseRenewer originalRenewer = dfs.getLeaseRenewer();
        dfs.lastLeaseRenewal = Time.monotonicNow() - HdfsConstants.LEASE_SOFTLIMIT_PERIOD - 1000;
        try {
            dfs.renewLease();
        } catch (IOException e) {
        }
        // renewing.
        try {
            d_out.write(buf, 0, 1024);
            LOG.info("Write worked beyond the soft limit as expected.");
        } catch (IOException e) {
            Assert.fail("Write failed.");
        }
        // make it look like the hard limit has been exceeded.
        dfs.lastLeaseRenewal = Time.monotonicNow() - HdfsConstants.LEASE_HARDLIMIT_PERIOD - 1000;
        dfs.renewLease();
        // this should not work.
        try {
            d_out.write(buf, 0, 1024);
            d_out.close();
            Assert.fail("Write did not fail even after the fatal lease renewal failure");
        } catch (IOException e) {
            LOG.info("Write failed as expected. ", e);
        }
        // If aborted, the renewer should be empty. (no reference to clients)
        Thread.sleep(1000);
        Assert.assertTrue(originalRenewer.isEmpty());
        // unstub
        doNothing().when(spyNN).renewLease(anyString());
        // existing input streams should work
        try {
            int num = c_in.read(buf, 0, 1);
            if (num != 1) {
                Assert.fail("Failed to read 1 byte");
            }
            c_in.close();
        } catch (IOException e) {
            LOG.error("Read failed with ", e);
            Assert.fail("Read after lease renewal failure failed");
        }
        // new file writes should work.
        try {
            c_out = createFsOut(dfs, dirString + "c");
            c_out.write(buf, 0, 1024);
            c_out.close();
        } catch (IOException e) {
            LOG.error("Write failed with ", e);
            Assert.fail("Write failed");
        }
    } finally {
        cluster.shutdown();
    }
}
Also used : NamenodeProtocols(org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols) LeaseRenewer(org.apache.hadoop.hdfs.client.impl.LeaseRenewer) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream) IOException(java.io.IOException) RemoteException(org.apache.hadoop.ipc.RemoteException) Test(org.junit.Test)

Aggregations

RemoteException (org.apache.hadoop.ipc.RemoteException)99 IOException (java.io.IOException)53 Test (org.junit.Test)39 Path (org.apache.hadoop.fs.Path)36 Configuration (org.apache.hadoop.conf.Configuration)20 FileNotFoundException (java.io.FileNotFoundException)19 FSDataOutputStream (org.apache.hadoop.fs.FSDataOutputStream)13 FileSystem (org.apache.hadoop.fs.FileSystem)12 InterruptedIOException (java.io.InterruptedIOException)10 AccessControlException (org.apache.hadoop.security.AccessControlException)10 ServerName (org.apache.hadoop.hbase.ServerName)9 DistributedFileSystem (org.apache.hadoop.hdfs.DistributedFileSystem)8 HdfsConfiguration (org.apache.hadoop.hdfs.HdfsConfiguration)8 FileAlreadyExistsException (org.apache.hadoop.fs.FileAlreadyExistsException)7 HRegionInfo (org.apache.hadoop.hbase.HRegionInfo)7 MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)7 EOFException (java.io.EOFException)6 ArrayList (java.util.ArrayList)6 DoNotRetryIOException (org.apache.hadoop.hbase.DoNotRetryIOException)6 HBaseIOException (org.apache.hadoop.hbase.HBaseIOException)6