Search in sources :

Example 1 with NetPayloadSchema

use of com.ociweb.pronghorn.network.schema.NetPayloadSchema in project GreenLightning by oci-pronghorn.

the class BuilderImpl method buildHTTPClientGraph.

public void buildHTTPClientGraph(MsgRuntime<?, ?> runtime, Pipe<NetResponseSchema>[] netResponsePipes, Pipe<ClientHTTPRequestSchema>[] netRequestPipes, Pipe<TrafficReleaseSchema>[][] masterGoOut, Pipe<TrafficAckSchema>[][] masterAckIn) {
    // //////
    if (useNetClient(netRequestPipes)) {
        int maxPartialResponses = Math.max(2, ClientHostPortInstance.getSessionCount());
        int connectionsInBits = (int) Math.ceil(Math.log(maxPartialResponses) / Math.log(2));
        int netResponseCount = 8;
        int responseQueue = 10;
        // must be adjusted together
        // Multipler per session for total connections ,count of pipes to channel writer
        int outputsCount = 8;
        // count of channel writer stages
        int clientWriters = 1;
        PipeConfig<NetPayloadSchema> clientNetRequestConfig = pcm.getConfig(NetPayloadSchema.class);
        // BUILD GRAPH
        ccm = new ClientCoordinator(connectionsInBits, maxPartialResponses, this.client.getCertificates(), gm.recordTypeData);
        Pipe<NetPayloadSchema>[] clientRequests = new Pipe[outputsCount];
        int r = outputsCount;
        while (--r >= 0) {
            clientRequests[r] = new Pipe<NetPayloadSchema>(clientNetRequestConfig);
        }
        if (isAllNull(masterGoOut[IDX_NET])) {
            // this one has much lower latency and should be used if possible
            new HTTPClientRequestStage(gm, ccm, netRequestPipes, clientRequests);
        } else {
            logger.info("Warning, the slower HTTP Client Request code was called. 2ms latency may be introduced.");
            // this may stay for as long as 2ms before returning due to timeout of
            // traffic logic, this is undesirable in some low latency cases.
            new HTTPClientRequestTrafficStage(gm, runtime, this, ccm, netRequestPipes, masterGoOut[IDX_NET], masterAckIn[IDX_NET], clientRequests);
        }
        int releaseCount = 1024;
        // takes more memory but limits writes, each creating a little GC
        int writeBufferMultiplier = 100;
        int responseUnwrapCount = 2;
        int clientWrapperCount = 2;
        NetGraphBuilder.buildHTTPClientGraph(gm, ccm, responseQueue, clientRequests, netResponsePipes, netResponseCount, releaseCount, writeBufferMultiplier, responseUnwrapCount, clientWrapperCount, clientWriters);
    }
}
Also used : ClientCoordinator(com.ociweb.pronghorn.network.ClientCoordinator) NetPayloadSchema(com.ociweb.pronghorn.network.schema.NetPayloadSchema) HTTPClientRequestTrafficStage(com.ociweb.gl.impl.stage.HTTPClientRequestTrafficStage) Pipe(com.ociweb.pronghorn.pipe.Pipe) HTTPClientRequestStage(com.ociweb.pronghorn.network.http.HTTPClientRequestStage)

Example 2 with NetPayloadSchema

use of com.ociweb.pronghorn.network.schema.NetPayloadSchema in project GreenLightning by oci-pronghorn.

the class HTTPClientRequestTrafficStage method processMessagesForPipe.

@Override
protected void processMessagesForPipe(int activePipe) {
    Pipe<ClientHTTPRequestSchema> requestPipe = input[activePipe];
    while (PipeReader.hasContentToRead(requestPipe) && hasReleaseCountRemaining(activePipe) && isChannelUnBlocked(activePipe) && hasOpenConnection(requestPipe, output, ccm, activePipe) && PipeReader.tryReadFragment(requestPipe)) {
        int msgIdx = PipeReader.getMsgIdx(requestPipe);
        switch(msgIdx) {
            case ClientHTTPRequestSchema.MSG_FASTHTTPGET_200:
                {
                    final byte[] hostBack = Pipe.blob(requestPipe);
                    final int hostPos = PipeReader.readBytesPosition(requestPipe, ClientHTTPRequestSchema.MSG_FASTHTTPGET_200_FIELD_HOST_2);
                    final int hostLen = PipeReader.readBytesLength(requestPipe, ClientHTTPRequestSchema.MSG_FASTHTTPGET_200_FIELD_HOST_2);
                    final int hostMask = Pipe.blobMask(requestPipe);
                    int routeId = PipeReader.readInt(requestPipe, ClientHTTPRequestSchema.MSG_FASTHTTPGET_200_FIELD_DESTINATION_11);
                    int port = PipeReader.readInt(requestPipe, ClientHTTPRequestSchema.MSG_FASTHTTPGET_200_FIELD_PORT_1);
                    int userId = PipeReader.readInt(requestPipe, ClientHTTPRequestSchema.MSG_FASTHTTPGET_200_FIELD_SESSION_10);
                    long connectionId = PipeReader.readLong(requestPipe, ClientHTTPRequestSchema.MSG_FASTHTTPGET_200_FIELD_CONNECTIONID_20);
                    ClientConnection clientConnection;
                    if (-1 != connectionId && null != (clientConnection = (ClientConnection) ccm.connectionForSessionId(connectionId))) {
                        assert (clientConnection.singleUsage(stageId)) : "Only a single Stage may update the clientConnection.";
                        assert (routeId >= 0);
                        clientConnection.recordDestinationRouteId(routeId);
                        publishGet(requestPipe, hostBack, hostPos, hostLen, connectionId, clientConnection, ClientHTTPRequestSchema.MSG_FASTHTTPGET_200_FIELD_PATH_3, ClientHTTPRequestSchema.MSG_FASTHTTPGET_200_FIELD_HEADERS_7);
                    }
                }
                break;
            case ClientHTTPRequestSchema.MSG_HTTPGET_100:
                {
                    final byte[] hostBack = Pipe.blob(requestPipe);
                    final int hostPos = PipeReader.readBytesPosition(requestPipe, ClientHTTPRequestSchema.MSG_HTTPGET_100_FIELD_HOST_2);
                    final int hostLen = PipeReader.readBytesLength(requestPipe, ClientHTTPRequestSchema.MSG_HTTPGET_100_FIELD_HOST_2);
                    final int hostMask = Pipe.blobMask(requestPipe);
                    int routeId = PipeReader.readInt(requestPipe, ClientHTTPRequestSchema.MSG_HTTPGET_100_FIELD_DESTINATION_11);
                    int port = PipeReader.readInt(requestPipe, ClientHTTPRequestSchema.MSG_HTTPGET_100_FIELD_PORT_1);
                    int userId = PipeReader.readInt(requestPipe, ClientHTTPRequestSchema.MSG_HTTPGET_100_FIELD_SESSION_10);
                    long connectionId = ccm.lookup(ccm.lookupHostId(hostBack, hostPos, hostLen, hostMask), port, userId);
                    ClientConnection clientConnection;
                    if (-1 != connectionId && null != (clientConnection = (ClientConnection) ccm.connectionForSessionId(connectionId))) {
                        assert (clientConnection.singleUsage(stageId)) : "Only a single Stage may update the clientConnection.";
                        assert (routeId >= 0);
                        clientConnection.recordDestinationRouteId(routeId);
                        publishGet(requestPipe, hostBack, hostPos, hostLen, connectionId, clientConnection, ClientHTTPRequestSchema.MSG_HTTPGET_100_FIELD_PATH_3, ClientHTTPRequestSchema.MSG_HTTPGET_100_FIELD_HEADERS_7);
                    }
                }
                break;
            case ClientHTTPRequestSchema.MSG_HTTPPOST_101:
                {
                    final byte[] hostBack = Pipe.blob(requestPipe);
                    final int hostPos = PipeReader.readBytesPosition(requestPipe, ClientHTTPRequestSchema.MSG_HTTPPOST_101_FIELD_HOST_2);
                    final int hostLen = PipeReader.readBytesLength(requestPipe, ClientHTTPRequestSchema.MSG_HTTPPOST_101_FIELD_HOST_2);
                    final int hostMask = Pipe.blobMask(requestPipe);
                    int routeId = PipeReader.readInt(requestPipe, ClientHTTPRequestSchema.MSG_HTTPPOST_101_FIELD_DESTINATION_11);
                    int userId = PipeReader.readInt(requestPipe, ClientHTTPRequestSchema.MSG_HTTPPOST_101_FIELD_SESSION_10);
                    int port = PipeReader.readInt(requestPipe, ClientHTTPRequestSchema.MSG_HTTPPOST_101_FIELD_PORT_1);
                    long connectionId = ccm.lookup(ccm.lookupHostId(hostBack, hostPos, hostLen, hostMask), port, userId);
                    // openConnection(activeHost, port, userId, outIdx);
                    ClientConnection clientConnection;
                    if ((-1 != connectionId) && (null != (clientConnection = (ClientConnection) ccm.connectionForSessionId(connectionId)))) {
                        assert (routeId >= 0);
                        clientConnection.recordDestinationRouteId(routeId);
                        int outIdx = clientConnection.requestPipeLineIdx();
                        // count of messages can only be done here.
                        clientConnection.incRequestsSent();
                        Pipe<NetPayloadSchema> outputPipe = output[outIdx];
                        PipeWriter.presumeWriteFragment(outputPipe, NetPayloadSchema.MSG_PLAIN_210);
                        PipeWriter.writeLong(outputPipe, NetPayloadSchema.MSG_PLAIN_210_FIELD_CONNECTIONID_201, connectionId);
                        DataOutputBlobWriter<NetPayloadSchema> activeWriter = PipeWriter.outputStream(outputPipe);
                        DataOutputBlobWriter.openField(activeWriter);
                        DataOutputBlobWriter.encodeAsUTF8(activeWriter, "POST");
                        int len = PipeReader.readBytesLength(requestPipe, ClientHTTPRequestSchema.MSG_HTTPPOST_101_FIELD_PATH_3);
                        int first = PipeReader.readBytesPosition(requestPipe, ClientHTTPRequestSchema.MSG_HTTPPOST_101_FIELD_PATH_3);
                        boolean prePendSlash = (0 == len) || ('/' != PipeReader.readBytesBackingArray(requestPipe, ClientHTTPRequestSchema.MSG_HTTPPOST_101_FIELD_PATH_3)[first & Pipe.blobMask(requestPipe)]);
                        if (prePendSlash) {
                            // NOTE: these can be pre-coverted to bytes so we need not convert on each write. may want to improve.
                            DataOutputBlobWriter.encodeAsUTF8(activeWriter, " /");
                        } else {
                            DataOutputBlobWriter.encodeAsUTF8(activeWriter, " ");
                        }
                        // Reading from UTF8 field and writing to UTF8 encoded field so we are doing a direct copy here.
                        PipeReader.readBytes(requestPipe, ClientHTTPRequestSchema.MSG_HTTPPOST_101_FIELD_PATH_3, activeWriter);
                        HeaderUtil.writeHeaderBeginning(hostBack, hostPos, hostLen, Pipe.blobMask(requestPipe), activeWriter);
                        HeaderUtil.writeHeaderMiddle(activeWriter, implementationVersion);
                        PipeReader.readBytes(requestPipe, ClientHTTPRequestSchema.MSG_HTTPPOST_101_FIELD_HEADERS_7, activeWriter);
                        long postLength = PipeReader.readBytesLength(requestPipe, ClientHTTPRequestSchema.MSG_HTTPPOST_101_FIELD_PAYLOAD_5);
                        HeaderUtil.writeHeaderEnding(activeWriter, true, postLength);
                        // TODO: How is chunking supported, that code is not here yet but length must be -1 I think.
                        PipeReader.readBytes(requestPipe, ClientHTTPRequestSchema.MSG_HTTPPOST_101_FIELD_PAYLOAD_5, activeWriter);
                        DataOutputBlobWriter.closeHighLevelField(activeWriter, NetPayloadSchema.MSG_PLAIN_210_FIELD_PAYLOAD_204);
                        PipeWriter.publishWrites(outputPipe);
                    }
                }
                break;
            case ClientHTTPRequestSchema.MSG_CLOSE_104:
                final byte[] hostBack = Pipe.blob(requestPipe);
                final int hostPos = PipeReader.readBytesPosition(requestPipe, ClientHTTPRequestSchema.MSG_CLOSE_104_FIELD_HOST_2);
                final int hostLen = PipeReader.readBytesLength(requestPipe, ClientHTTPRequestSchema.MSG_CLOSE_104_FIELD_HOST_2);
                final int hostMask = Pipe.blobMask(requestPipe);
                final int port = PipeReader.readInt(requestPipe, ClientHTTPRequestSchema.MSG_CLOSE_104_FIELD_PORT_1);
                final int userId = PipeReader.readInt(requestPipe, ClientHTTPRequestSchema.MSG_CLOSE_104_FIELD_SESSION_10);
                long connectionId = ccm.lookup(ccm.lookupHostId(hostBack, hostPos, hostLen, hostMask), port, userId);
                // only close if we find a live connection
                if ((-1 != connectionId)) {
                    ClientConnection connectionToKill = (ClientConnection) ccm.connectionForSessionId(connectionId);
                    if (null != connectionToKill) {
                        Pipe<NetPayloadSchema> outputPipe = output[connectionToKill.requestPipeLineIdx()];
                        // do not close that will be done by last stage
                        // must be done first before we send the message
                        connectionToKill.beginDisconnect();
                        PipeWriter.presumeWriteFragment(outputPipe, NetPayloadSchema.MSG_DISCONNECT_203);
                        PipeWriter.writeLong(outputPipe, NetPayloadSchema.MSG_DISCONNECT_203_FIELD_CONNECTIONID_201, connectionId);
                        PipeWriter.publishWrites(outputPipe);
                    }
                }
                break;
            default:
                logger.info("not yet supporting message {}", msgIdx);
        }
        PipeReader.releaseReadLock(requestPipe);
        // only do now after we know its not blocked and was completed
        decReleaseCount(activePipe);
    }
}
Also used : ClientHTTPRequestSchema(com.ociweb.pronghorn.network.schema.ClientHTTPRequestSchema) DataOutputBlobWriter(com.ociweb.pronghorn.pipe.DataOutputBlobWriter) NetPayloadSchema(com.ociweb.pronghorn.network.schema.NetPayloadSchema) ClientConnection(com.ociweb.pronghorn.network.ClientConnection) Pipe(com.ociweb.pronghorn.pipe.Pipe)

Aggregations

NetPayloadSchema (com.ociweb.pronghorn.network.schema.NetPayloadSchema)2 Pipe (com.ociweb.pronghorn.pipe.Pipe)2 HTTPClientRequestTrafficStage (com.ociweb.gl.impl.stage.HTTPClientRequestTrafficStage)1 ClientConnection (com.ociweb.pronghorn.network.ClientConnection)1 ClientCoordinator (com.ociweb.pronghorn.network.ClientCoordinator)1 HTTPClientRequestStage (com.ociweb.pronghorn.network.http.HTTPClientRequestStage)1 ClientHTTPRequestSchema (com.ociweb.pronghorn.network.schema.ClientHTTPRequestSchema)1 DataOutputBlobWriter (com.ociweb.pronghorn.pipe.DataOutputBlobWriter)1