Search in sources :

Example 1 with RpcResponseHeaderProto

use of org.apache.hadoop.ipc.protobuf.RpcHeaderProtos.RpcResponseHeaderProto in project hadoop by apache.

the class SaslRpcClient method saslConnect.

/**
   * Do client side SASL authentication with server via the given InputStream
   * and OutputStream
   * 
   * @param inS
   *          InputStream to use
   * @param outS
   *          OutputStream to use
   * @return AuthMethod used to negotiate the connection
   * @throws IOException
   */
public AuthMethod saslConnect(IpcStreams ipcStreams) throws IOException {
    // redefined if/when a SASL negotiation starts, can be queried if the
    // negotiation fails
    authMethod = AuthMethod.SIMPLE;
    sendSaslMessage(ipcStreams.out, negotiateRequest);
    // loop until sasl is complete or a rpc error occurs
    boolean done = false;
    do {
        ByteBuffer bb = ipcStreams.readResponse();
        RpcWritable.Buffer saslPacket = RpcWritable.Buffer.wrap(bb);
        RpcResponseHeaderProto header = saslPacket.getValue(RpcResponseHeaderProto.getDefaultInstance());
        switch(header.getStatus()) {
            // might get a RPC error during 
            case ERROR:
            case FATAL:
                throw new RemoteException(header.getExceptionClassName(), header.getErrorMsg());
            default:
                break;
        }
        if (header.getCallId() != AuthProtocol.SASL.callId) {
            throw new SaslException("Non-SASL response during negotiation");
        }
        RpcSaslProto saslMessage = saslPacket.getValue(RpcSaslProto.getDefaultInstance());
        if (saslPacket.remaining() > 0) {
            throw new SaslException("Received malformed response length");
        }
        // handle sasl negotiation process
        RpcSaslProto.Builder response = null;
        switch(saslMessage.getState()) {
            case NEGOTIATE:
                {
                    // create a compatible SASL client, throws if no supported auths
                    SaslAuth saslAuthType = selectSaslClient(saslMessage.getAuthsList());
                    // define auth being attempted, caller can query if connect fails
                    authMethod = AuthMethod.valueOf(saslAuthType.getMethod());
                    byte[] responseToken = null;
                    if (authMethod == AuthMethod.SIMPLE) {
                        // switching to SIMPLE
                        // not going to wait for success ack
                        done = true;
                    } else {
                        byte[] challengeToken = null;
                        if (saslAuthType.hasChallenge()) {
                            // server provided the first challenge
                            challengeToken = saslAuthType.getChallenge().toByteArray();
                            saslAuthType = SaslAuth.newBuilder(saslAuthType).clearChallenge().build();
                        } else if (saslClient.hasInitialResponse()) {
                            challengeToken = new byte[0];
                        }
                        responseToken = (challengeToken != null) ? saslClient.evaluateChallenge(challengeToken) : new byte[0];
                    }
                    response = createSaslReply(SaslState.INITIATE, responseToken);
                    response.addAuths(saslAuthType);
                    break;
                }
            case CHALLENGE:
                {
                    if (saslClient == null) {
                        // demand a specific negotiation
                        throw new SaslException("Server sent unsolicited challenge");
                    }
                    byte[] responseToken = saslEvaluateToken(saslMessage, false);
                    response = createSaslReply(SaslState.RESPONSE, responseToken);
                    break;
                }
            case SUCCESS:
                {
                    // switch to simple
                    if (saslClient == null) {
                        authMethod = AuthMethod.SIMPLE;
                    } else {
                        saslEvaluateToken(saslMessage, true);
                    }
                    done = true;
                    break;
                }
            default:
                {
                    throw new SaslException("RPC client doesn't support SASL " + saslMessage.getState());
                }
        }
        if (response != null) {
            sendSaslMessage(ipcStreams.out, response.build());
        }
    } while (!done);
    return authMethod;
}
Also used : RpcResponseHeaderProto(org.apache.hadoop.ipc.protobuf.RpcHeaderProtos.RpcResponseHeaderProto) SaslAuth(org.apache.hadoop.ipc.protobuf.RpcHeaderProtos.RpcSaslProto.SaslAuth) RpcWritable(org.apache.hadoop.ipc.RpcWritable) RemoteException(org.apache.hadoop.ipc.RemoteException) SaslException(javax.security.sasl.SaslException) ByteBuffer(java.nio.ByteBuffer) RpcSaslProto(org.apache.hadoop.ipc.protobuf.RpcHeaderProtos.RpcSaslProto)

Example 2 with RpcResponseHeaderProto

use of org.apache.hadoop.ipc.protobuf.RpcHeaderProtos.RpcResponseHeaderProto in project hadoop by apache.

the class TestAsyncIPC method testCallIdAndRetry.

/**
   * Test if (1) the rpc server uses the call id/retry provided by the rpc
   * client, and (2) the rpc client receives the same call id/retry from the rpc
   * server.
   *
   * @throws ExecutionException
   * @throws InterruptedException
   */
@Test(timeout = 60000)
public void testCallIdAndRetry() throws IOException, InterruptedException, ExecutionException {
    final Map<Integer, CallInfo> infoMap = new HashMap<Integer, CallInfo>();
    // Override client to store the call info and check response
    final Client client = new Client(LongWritable.class, conf) {

        @Override
        Call createCall(RpcKind rpcKind, Writable rpcRequest) {
            // Set different call id and retry count for the next call
            Client.setCallIdAndRetryCount(Client.nextCallId(), TestIPC.RANDOM.nextInt(255), null);
            final Call call = super.createCall(rpcKind, rpcRequest);
            CallInfo info = new CallInfo();
            info.id = call.id;
            info.retry = call.retry;
            infoMap.put(call.id, info);
            return call;
        }

        @Override
        void checkResponse(RpcResponseHeaderProto header) throws IOException {
            super.checkResponse(header);
            Assert.assertEquals(infoMap.get(header.getCallId()).retry, header.getRetryCount());
        }
    };
    // Attach a listener that tracks every call received by the server.
    final TestServer server = new TestIPC.TestServer(1, false, conf);
    server.callListener = new Runnable() {

        @Override
        public void run() {
            Assert.assertEquals(infoMap.get(Server.getCallId()).retry, Server.getCallRetryCount());
        }
    };
    try {
        InetSocketAddress addr = NetUtils.getConnectAddress(server);
        server.start();
        final AsyncCaller caller = new AsyncCaller(client, addr, 4);
        caller.run();
        caller.assertReturnValues();
    } finally {
        client.stop();
        server.stop();
    }
}
Also used : InetSocketAddress(java.net.InetSocketAddress) CallInfo(org.apache.hadoop.ipc.TestIPC.CallInfo) Writable(org.apache.hadoop.io.Writable) LongWritable(org.apache.hadoop.io.LongWritable) RpcKind(org.apache.hadoop.ipc.RPC.RpcKind) TestServer(org.apache.hadoop.ipc.TestIPC.TestServer) RpcResponseHeaderProto(org.apache.hadoop.ipc.protobuf.RpcHeaderProtos.RpcResponseHeaderProto) Test(org.junit.Test)

Example 3 with RpcResponseHeaderProto

use of org.apache.hadoop.ipc.protobuf.RpcHeaderProtos.RpcResponseHeaderProto in project hadoop by apache.

the class TestIPC method testCallIdAndRetry.

/**
   * Test if
   * (1) the rpc server uses the call id/retry provided by the rpc client, and
   * (2) the rpc client receives the same call id/retry from the rpc server.
   */
@Test(timeout = 60000)
public void testCallIdAndRetry() throws IOException {
    final CallInfo info = new CallInfo();
    // Override client to store the call info and check response
    final Client client = new Client(LongWritable.class, conf) {

        @Override
        Call createCall(RpcKind rpcKind, Writable rpcRequest) {
            final Call call = super.createCall(rpcKind, rpcRequest);
            info.id = call.id;
            info.retry = call.retry;
            return call;
        }

        @Override
        void checkResponse(RpcResponseHeaderProto header) throws IOException {
            super.checkResponse(header);
            Assert.assertEquals(info.id, header.getCallId());
            Assert.assertEquals(info.retry, header.getRetryCount());
        }
    };
    // Attach a listener that tracks every call received by the server.
    final TestServer server = new TestServer(1, false);
    server.callListener = new Runnable() {

        @Override
        public void run() {
            Assert.assertEquals(info.id, Server.getCallId());
            Assert.assertEquals(info.retry, Server.getCallRetryCount());
        }
    };
    try {
        InetSocketAddress addr = NetUtils.getConnectAddress(server);
        server.start();
        final SerialCaller caller = new SerialCaller(client, addr, 10);
        caller.run();
        assertFalse(caller.failed);
    } finally {
        client.stop();
        server.stop();
    }
}
Also used : Call(org.apache.hadoop.ipc.Server.Call) RpcResponseHeaderProto(org.apache.hadoop.ipc.protobuf.RpcHeaderProtos.RpcResponseHeaderProto) InetSocketAddress(java.net.InetSocketAddress) Writable(org.apache.hadoop.io.Writable) LongWritable(org.apache.hadoop.io.LongWritable) RpcKind(org.apache.hadoop.ipc.RPC.RpcKind) Test(org.junit.Test)

Example 4 with RpcResponseHeaderProto

use of org.apache.hadoop.ipc.protobuf.RpcHeaderProtos.RpcResponseHeaderProto in project hadoop by apache.

the class Server method setupResponse.

/**
   * Setup response for the IPC Call.
   * 
   * @param call {@link Call} to which we are setting up the response
   * @param status of the IPC call
   * @param rv return value for the IPC Call, if the call was successful
   * @param errorClass error class, if the the call failed
   * @param error error message, if the call failed
   * @throws IOException
   */
private void setupResponse(RpcCall call, RpcStatusProto status, RpcErrorCodeProto erCode, Writable rv, String errorClass, String error) throws IOException {
    // fatal responses will cause the reader to close the connection.
    if (status == RpcStatusProto.FATAL) {
        call.connection.setShouldClose();
    }
    RpcResponseHeaderProto.Builder headerBuilder = RpcResponseHeaderProto.newBuilder();
    headerBuilder.setClientId(ByteString.copyFrom(call.clientId));
    headerBuilder.setCallId(call.callId);
    headerBuilder.setRetryCount(call.retryCount);
    headerBuilder.setStatus(status);
    headerBuilder.setServerIpcVersionNum(CURRENT_VERSION);
    if (status == RpcStatusProto.SUCCESS) {
        RpcResponseHeaderProto header = headerBuilder.build();
        try {
            setupResponse(call, header, rv);
        } catch (Throwable t) {
            LOG.warn("Error serializing call response for call " + call, t);
            // Call back to same function - this is OK since the
            // buffer is reset at the top, and since status is changed
            // to ERROR it won't infinite loop.
            setupResponse(call, RpcStatusProto.ERROR, RpcErrorCodeProto.ERROR_SERIALIZING_RESPONSE, null, t.getClass().getName(), StringUtils.stringifyException(t));
            return;
        }
    } else {
        // Rpc Failure
        headerBuilder.setExceptionClassName(errorClass);
        headerBuilder.setErrorMsg(error);
        headerBuilder.setErrorDetail(erCode);
        setupResponse(call, headerBuilder.build(), null);
    }
}
Also used : RpcResponseHeaderProto(org.apache.hadoop.ipc.protobuf.RpcHeaderProtos.RpcResponseHeaderProto)

Example 5 with RpcResponseHeaderProto

use of org.apache.hadoop.ipc.protobuf.RpcHeaderProtos.RpcResponseHeaderProto in project hadoop by apache.

the class Server method wrapWithSasl.

private void wrapWithSasl(RpcCall call) throws IOException {
    if (call.connection.saslServer != null) {
        byte[] token = call.rpcResponse.array();
        // threads using saslServer to wrap responses.
        synchronized (call.connection.saslServer) {
            token = call.connection.saslServer.wrap(token, 0, token.length);
        }
        if (LOG.isDebugEnabled())
            LOG.debug("Adding saslServer wrapped token of size " + token.length + " as call response.");
        // rebuild with sasl header and payload
        RpcResponseHeaderProto saslHeader = RpcResponseHeaderProto.newBuilder().setCallId(AuthProtocol.SASL.callId).setStatus(RpcStatusProto.SUCCESS).build();
        RpcSaslProto saslMessage = RpcSaslProto.newBuilder().setState(SaslState.WRAP).setToken(ByteString.copyFrom(token)).build();
        setupResponse(call, saslHeader, RpcWritable.wrap(saslMessage));
    }
}
Also used : RpcResponseHeaderProto(org.apache.hadoop.ipc.protobuf.RpcHeaderProtos.RpcResponseHeaderProto) RpcSaslProto(org.apache.hadoop.ipc.protobuf.RpcHeaderProtos.RpcSaslProto)

Aggregations

RpcResponseHeaderProto (org.apache.hadoop.ipc.protobuf.RpcHeaderProtos.RpcResponseHeaderProto)5 InetSocketAddress (java.net.InetSocketAddress)2 LongWritable (org.apache.hadoop.io.LongWritable)2 Writable (org.apache.hadoop.io.Writable)2 RpcKind (org.apache.hadoop.ipc.RPC.RpcKind)2 RpcSaslProto (org.apache.hadoop.ipc.protobuf.RpcHeaderProtos.RpcSaslProto)2 Test (org.junit.Test)2 ByteBuffer (java.nio.ByteBuffer)1 SaslException (javax.security.sasl.SaslException)1 RemoteException (org.apache.hadoop.ipc.RemoteException)1 RpcWritable (org.apache.hadoop.ipc.RpcWritable)1 Call (org.apache.hadoop.ipc.Server.Call)1 CallInfo (org.apache.hadoop.ipc.TestIPC.CallInfo)1 TestServer (org.apache.hadoop.ipc.TestIPC.TestServer)1 SaslAuth (org.apache.hadoop.ipc.protobuf.RpcHeaderProtos.RpcSaslProto.SaslAuth)1