Search in sources :

Example 1 with RpcResponse

use of org.apache.hadoop.oncrpc.RpcResponse in project hadoop by apache.

the class RpcProgramMountd method handleInternal.

@Override
public void handleInternal(ChannelHandlerContext ctx, RpcInfo info) {
    RpcCall rpcCall = (RpcCall) info.header();
    final MNTPROC mntproc = MNTPROC.fromValue(rpcCall.getProcedure());
    int xid = rpcCall.getXid();
    byte[] data = new byte[info.data().readableBytes()];
    info.data().readBytes(data);
    XDR xdr = new XDR(data);
    XDR out = new XDR();
    InetAddress client = ((InetSocketAddress) info.remoteAddress()).getAddress();
    if (mntproc == MNTPROC.NULL) {
        out = nullOp(out, xid, client);
    } else if (mntproc == MNTPROC.MNT) {
        // Only do port monitoring for MNT
        if (!doPortMonitoring(info.remoteAddress())) {
            out = MountResponse.writeMNTResponse(Nfs3Status.NFS3ERR_ACCES, out, xid, null);
        } else {
            out = mnt(xdr, out, xid, client);
        }
    } else if (mntproc == MNTPROC.DUMP) {
        out = dump(out, xid, client);
    } else if (mntproc == MNTPROC.UMNT) {
        out = umnt(xdr, out, xid, client);
    } else if (mntproc == MNTPROC.UMNTALL) {
        umntall(out, xid, client);
    } else if (mntproc == MNTPROC.EXPORT) {
        // Currently only support one NFS export
        List<NfsExports> hostsMatchers = new ArrayList<NfsExports>();
        if (hostsMatcher != null) {
            hostsMatchers.add(hostsMatcher);
            out = MountResponse.writeExportList(out, xid, exports, hostsMatchers);
        } else {
            // This means there are no valid exports provided.
            RpcAcceptedReply.getInstance(xid, RpcAcceptedReply.AcceptState.PROC_UNAVAIL, new VerifierNone()).write(out);
        }
    } else {
        // Invalid procedure
        RpcAcceptedReply.getInstance(xid, RpcAcceptedReply.AcceptState.PROC_UNAVAIL, new VerifierNone()).write(out);
    }
    ChannelBuffer buf = ChannelBuffers.wrappedBuffer(out.asReadOnlyWrap().buffer());
    RpcResponse rsp = new RpcResponse(buf, info.remoteAddress());
    RpcUtil.sendRpcResponse(ctx, rsp);
}
Also used : InetSocketAddress(java.net.InetSocketAddress) XDR(org.apache.hadoop.oncrpc.XDR) RpcResponse(org.apache.hadoop.oncrpc.RpcResponse) ChannelBuffer(org.jboss.netty.buffer.ChannelBuffer) RpcCall(org.apache.hadoop.oncrpc.RpcCall) VerifierNone(org.apache.hadoop.oncrpc.security.VerifierNone) NfsExports(org.apache.hadoop.nfs.NfsExports) ArrayList(java.util.ArrayList) List(java.util.List) InetAddress(java.net.InetAddress)

Example 2 with RpcResponse

use of org.apache.hadoop.oncrpc.RpcResponse in project hadoop by apache.

the class RpcProgramPortmap method messageReceived.

@Override
public void messageReceived(ChannelHandlerContext ctx, MessageEvent e) throws Exception {
    RpcInfo info = (RpcInfo) e.getMessage();
    RpcCall rpcCall = (RpcCall) info.header();
    final int portmapProc = rpcCall.getProcedure();
    int xid = rpcCall.getXid();
    XDR in = new XDR(info.data().toByteBuffer().asReadOnlyBuffer(), XDR.State.READING);
    XDR out = new XDR();
    if (portmapProc == PMAPPROC_NULL) {
        out = nullOp(xid, in, out);
    } else if (portmapProc == PMAPPROC_SET) {
        out = set(xid, in, out);
    } else if (portmapProc == PMAPPROC_UNSET) {
        out = unset(xid, in, out);
    } else if (portmapProc == PMAPPROC_DUMP) {
        out = dump(xid, in, out);
    } else if (portmapProc == PMAPPROC_GETPORT) {
        out = getport(xid, in, out);
    } else if (portmapProc == PMAPPROC_GETVERSADDR) {
        out = getport(xid, in, out);
    } else {
        LOG.info("PortmapHandler unknown rpc procedure=" + portmapProc);
        RpcAcceptedReply reply = RpcAcceptedReply.getInstance(xid, RpcAcceptedReply.AcceptState.PROC_UNAVAIL, new VerifierNone());
        reply.write(out);
    }
    ChannelBuffer buf = ChannelBuffers.wrappedBuffer(out.asReadOnlyWrap().buffer());
    RpcResponse rsp = new RpcResponse(buf, info.remoteAddress());
    RpcUtil.sendRpcResponse(ctx, rsp);
}
Also used : RpcInfo(org.apache.hadoop.oncrpc.RpcInfo) RpcCall(org.apache.hadoop.oncrpc.RpcCall) XDR(org.apache.hadoop.oncrpc.XDR) VerifierNone(org.apache.hadoop.oncrpc.security.VerifierNone) RpcAcceptedReply(org.apache.hadoop.oncrpc.RpcAcceptedReply) RpcResponse(org.apache.hadoop.oncrpc.RpcResponse) ChannelBuffer(org.jboss.netty.buffer.ChannelBuffer)

Example 3 with RpcResponse

use of org.apache.hadoop.oncrpc.RpcResponse in project hadoop by apache.

the class RpcProgramNfs3 method handleInternal.

@Override
public void handleInternal(ChannelHandlerContext ctx, RpcInfo info) {
    RpcCall rpcCall = (RpcCall) info.header();
    final NFSPROC3 nfsproc3 = NFSPROC3.fromValue(rpcCall.getProcedure());
    int xid = rpcCall.getXid();
    byte[] data = new byte[info.data().readableBytes()];
    info.data().readBytes(data);
    XDR xdr = new XDR(data);
    XDR out = new XDR();
    InetAddress client = ((InetSocketAddress) info.remoteAddress()).getAddress();
    Credentials credentials = rpcCall.getCredential();
    // Ignore auth only for NFSPROC3_NULL, especially for Linux clients.
    if (nfsproc3 != NFSPROC3.NULL) {
        if (credentials.getFlavor() != AuthFlavor.AUTH_SYS && credentials.getFlavor() != AuthFlavor.RPCSEC_GSS) {
            LOG.info("Wrong RPC AUTH flavor, " + credentials.getFlavor() + " is not AUTH_SYS or RPCSEC_GSS.");
            XDR reply = new XDR();
            RpcDeniedReply rdr = new RpcDeniedReply(xid, RpcReply.ReplyState.MSG_ACCEPTED, RpcDeniedReply.RejectState.AUTH_ERROR, new VerifierNone());
            rdr.write(reply);
            ChannelBuffer buf = ChannelBuffers.wrappedBuffer(reply.asReadOnlyWrap().buffer());
            RpcResponse rsp = new RpcResponse(buf, info.remoteAddress());
            RpcUtil.sendRpcResponse(ctx, rsp);
            return;
        }
    }
    if (!isIdempotent(rpcCall)) {
        RpcCallCache.CacheEntry entry = rpcCallCache.checkOrAddToCache(client, xid);
        if (entry != null) {
            // in cache
            if (entry.isCompleted()) {
                LOG.info("Sending the cached reply to retransmitted request " + xid);
                RpcUtil.sendRpcResponse(ctx, entry.getResponse());
                return;
            } else {
                // else request is in progress
                LOG.info("Retransmitted request, transaction still in progress " + xid);
                // Ignore the request and do nothing
                return;
            }
        }
    }
    // Since write and commit could be async, they use their own startTime and
    // only record success requests.
    final long startTime = System.nanoTime();
    NFS3Response response = null;
    if (nfsproc3 == NFSPROC3.NULL) {
        response = nullProcedure();
    } else if (nfsproc3 == NFSPROC3.GETATTR) {
        response = getattr(xdr, info);
        metrics.addGetattr(Nfs3Utils.getElapsedTime(startTime));
    } else if (nfsproc3 == NFSPROC3.SETATTR) {
        response = setattr(xdr, info);
        metrics.addSetattr(Nfs3Utils.getElapsedTime(startTime));
    } else if (nfsproc3 == NFSPROC3.LOOKUP) {
        response = lookup(xdr, info);
        metrics.addLookup(Nfs3Utils.getElapsedTime(startTime));
    } else if (nfsproc3 == NFSPROC3.ACCESS) {
        response = access(xdr, info);
        metrics.addAccess(Nfs3Utils.getElapsedTime(startTime));
    } else if (nfsproc3 == NFSPROC3.READLINK) {
        response = readlink(xdr, info);
        metrics.addReadlink(Nfs3Utils.getElapsedTime(startTime));
    } else if (nfsproc3 == NFSPROC3.READ) {
        if (LOG.isDebugEnabled()) {
            LOG.debug(Nfs3Utils.READ_RPC_START + xid);
        }
        response = read(xdr, info);
        if (LOG.isDebugEnabled() && (nfsproc3 == NFSPROC3.READ)) {
            LOG.debug(Nfs3Utils.READ_RPC_END + xid);
        }
        metrics.addRead(Nfs3Utils.getElapsedTime(startTime));
    } else if (nfsproc3 == NFSPROC3.WRITE) {
        if (LOG.isDebugEnabled()) {
            LOG.debug(Nfs3Utils.WRITE_RPC_START + xid);
        }
        response = write(xdr, info);
    // Write end debug trace is in Nfs3Utils.writeChannel
    } else if (nfsproc3 == NFSPROC3.CREATE) {
        response = create(xdr, info);
        metrics.addCreate(Nfs3Utils.getElapsedTime(startTime));
    } else if (nfsproc3 == NFSPROC3.MKDIR) {
        response = mkdir(xdr, info);
        metrics.addMkdir(Nfs3Utils.getElapsedTime(startTime));
    } else if (nfsproc3 == NFSPROC3.SYMLINK) {
        response = symlink(xdr, info);
        metrics.addSymlink(Nfs3Utils.getElapsedTime(startTime));
    } else if (nfsproc3 == NFSPROC3.MKNOD) {
        response = mknod(xdr, info);
        metrics.addMknod(Nfs3Utils.getElapsedTime(startTime));
    } else if (nfsproc3 == NFSPROC3.REMOVE) {
        response = remove(xdr, info);
        metrics.addRemove(Nfs3Utils.getElapsedTime(startTime));
    } else if (nfsproc3 == NFSPROC3.RMDIR) {
        response = rmdir(xdr, info);
        metrics.addRmdir(Nfs3Utils.getElapsedTime(startTime));
    } else if (nfsproc3 == NFSPROC3.RENAME) {
        response = rename(xdr, info);
        metrics.addRename(Nfs3Utils.getElapsedTime(startTime));
    } else if (nfsproc3 == NFSPROC3.LINK) {
        response = link(xdr, info);
        metrics.addLink(Nfs3Utils.getElapsedTime(startTime));
    } else if (nfsproc3 == NFSPROC3.READDIR) {
        response = readdir(xdr, info);
        metrics.addReaddir(Nfs3Utils.getElapsedTime(startTime));
    } else if (nfsproc3 == NFSPROC3.READDIRPLUS) {
        response = readdirplus(xdr, info);
        metrics.addReaddirplus(Nfs3Utils.getElapsedTime(startTime));
    } else if (nfsproc3 == NFSPROC3.FSSTAT) {
        response = fsstat(xdr, info);
        metrics.addFsstat(Nfs3Utils.getElapsedTime(startTime));
    } else if (nfsproc3 == NFSPROC3.FSINFO) {
        response = fsinfo(xdr, info);
        metrics.addFsinfo(Nfs3Utils.getElapsedTime(startTime));
    } else if (nfsproc3 == NFSPROC3.PATHCONF) {
        response = pathconf(xdr, info);
        metrics.addPathconf(Nfs3Utils.getElapsedTime(startTime));
    } else if (nfsproc3 == NFSPROC3.COMMIT) {
        response = commit(xdr, info);
    } else {
        // Invalid procedure
        RpcAcceptedReply.getInstance(xid, RpcAcceptedReply.AcceptState.PROC_UNAVAIL, new VerifierNone()).write(out);
    }
    if (response == null) {
        if (LOG.isDebugEnabled()) {
            LOG.debug("No sync response, expect an async response for request XID=" + rpcCall.getXid());
        }
        return;
    }
    // TODO: currently we just return VerifierNone
    out = response.serialize(out, xid, new VerifierNone());
    ChannelBuffer buf = ChannelBuffers.wrappedBuffer(out.asReadOnlyWrap().buffer());
    RpcResponse rsp = new RpcResponse(buf, info.remoteAddress());
    if (!isIdempotent(rpcCall)) {
        rpcCallCache.callCompleted(client, xid, rsp);
    }
    RpcUtil.sendRpcResponse(ctx, rsp);
}
Also used : InetSocketAddress(java.net.InetSocketAddress) XDR(org.apache.hadoop.oncrpc.XDR) RpcResponse(org.apache.hadoop.oncrpc.RpcResponse) ChannelBuffer(org.jboss.netty.buffer.ChannelBuffer) RpcCall(org.apache.hadoop.oncrpc.RpcCall) RpcDeniedReply(org.apache.hadoop.oncrpc.RpcDeniedReply) NFS3Response(org.apache.hadoop.nfs.nfs3.response.NFS3Response) VerifierNone(org.apache.hadoop.oncrpc.security.VerifierNone) NFSPROC3(org.apache.hadoop.nfs.nfs3.Nfs3Constant.NFSPROC3) InetAddress(java.net.InetAddress) Credentials(org.apache.hadoop.oncrpc.security.Credentials) RpcCallCache(org.apache.hadoop.oncrpc.RpcCallCache)

Aggregations

RpcCall (org.apache.hadoop.oncrpc.RpcCall)3 RpcResponse (org.apache.hadoop.oncrpc.RpcResponse)3 XDR (org.apache.hadoop.oncrpc.XDR)3 VerifierNone (org.apache.hadoop.oncrpc.security.VerifierNone)3 ChannelBuffer (org.jboss.netty.buffer.ChannelBuffer)3 InetAddress (java.net.InetAddress)2 InetSocketAddress (java.net.InetSocketAddress)2 ArrayList (java.util.ArrayList)1 List (java.util.List)1 NfsExports (org.apache.hadoop.nfs.NfsExports)1 NFSPROC3 (org.apache.hadoop.nfs.nfs3.Nfs3Constant.NFSPROC3)1 NFS3Response (org.apache.hadoop.nfs.nfs3.response.NFS3Response)1 RpcAcceptedReply (org.apache.hadoop.oncrpc.RpcAcceptedReply)1 RpcCallCache (org.apache.hadoop.oncrpc.RpcCallCache)1 RpcDeniedReply (org.apache.hadoop.oncrpc.RpcDeniedReply)1 RpcInfo (org.apache.hadoop.oncrpc.RpcInfo)1 Credentials (org.apache.hadoop.oncrpc.security.Credentials)1