use of com.ociweb.pronghorn.pipe.Pipe in project GreenLightning by oci-pronghorn.
the class MsgRuntime method autoWireTransducers.
protected Pipe<?>[] autoWireTransducers(final Behavior listener, Pipe<?>[] inputPipes, final ArrayList<ReactiveManagerPipeConsumer> consumers) {
if (inputPipes.length == 0) {
// no work since no inputs are used.
return inputPipes;
}
final Grouper g = new Grouper(inputPipes);
ChildClassScannerVisitor tVisitor = new ChildClassScannerVisitor() {
@Override
public boolean visit(Object child, Object topParent) {
if (g.additions() == 0) {
// add first value
Pipe[] pipes = builder.operators.createPipes(builder, listener, g);
consumers.add(new ReactiveManagerPipeConsumer(listener, builder.operators, pipes));
g.add(pipes);
}
int c = consumers.size();
while (--c >= 0) {
if (consumers.get(c).obj == child) {
// do not add this one it is already recorded
return true;
}
}
Pipe[] pipes = builder.operators.createPipes(builder, child, g);
consumers.add(new ReactiveManagerPipeConsumer(child, builder.operators, pipes));
g.add(pipes);
return true;
}
};
ChildClassScanner.visitUsedByClass(listener, tVisitor, ListenerTransducer.class);
if (g.additions() > 0) {
inputPipes = g.firstArray();
g.buildReplicators(gm, consumers);
}
return inputPipes;
}
use of com.ociweb.pronghorn.pipe.Pipe in project GreenLightning by oci-pronghorn.
the class MsgRuntime method buildLastHalfOfGraphForServer.
private void buildLastHalfOfGraphForServer(MsgApp app, ServerPipesConfig serverConfig, ServerCoordinator serverCoord, final int routerCount, Pipe[] acks, Pipe[] handshakeIncomingGroup, Pipe[] planIncomingGroup) {
if (app instanceof MsgAppParallel) {
int p = builder.parallelTracks();
for (int i = 0; i < p; i++) {
constructingParallelInstance(i);
// this creates all the modules for this parallel instance
((MsgAppParallel) app).declareParallelBehavior(this);
}
} else {
if (builder.parallelTracks() > 1) {
throw new UnsupportedOperationException("Remove call to parallelism(" + builder.parallelTracks() + ") OR make the application implement GreenAppParallel or something extending it.");
}
}
// ////////////////
// ////////////////
HTTP1xRouterStageConfig routerConfig = builder.routerConfig();
ArrayList<Pipe> forPipeCleaner = new ArrayList<Pipe>();
Pipe<HTTPRequestSchema>[][] fromRouterToModules = new Pipe[routerCount][];
int t = routerCount;
int totalRequestPipes = 0;
while (--t >= 0) {
// [router/parallel] then [parser/routes]
int path = routerConfig.totalPathsCount();
// /////////////
if (path == 0) {
path = 1;
}
// ///////////
fromRouterToModules[t] = new Pipe[path];
while (--path >= 0) {
ArrayList<Pipe<HTTPRequestSchema>> requestPipes = builder.buildFromRequestArray(t, path);
// with a single pipe just pass it one, otherwise use the replicator to fan out from a new single pipe.
int size = requestPipes.size();
totalRequestPipes += size;
if (1 == size) {
fromRouterToModules[t][path] = requestPipes.get(0);
} else {
// we only create a pipe when we are about to use the replicator
fromRouterToModules[t][path] = builder.newHTTPRequestPipe(builder.pcm.getConfig(HTTPRequestSchema.class));
if (0 == size) {
logger.info("warning there are routes without any consumers");
// we have no consumer so tie it to pipe cleaner
forPipeCleaner.add(fromRouterToModules[t][path]);
} else {
ReplicatorStage.newInstance(gm, fromRouterToModules[t][path], requestPipes.toArray(new Pipe[requestPipes.size()]));
}
}
}
if (0 == totalRequestPipes) {
logger.warn("ERROR: includeRoutes or includeAllRoutes must be called on REST listener.");
}
}
if (!forPipeCleaner.isEmpty()) {
PipeCleanerStage.newInstance(gm, forPipeCleaner.toArray(new Pipe[forPipeCleaner.size()]));
}
// NOTE: building arrays of pipes grouped by parallel/routers heading out to order supervisor
Pipe<ServerResponseSchema>[][] fromModulesToOrderSuper = new Pipe[routerCount][];
Pipe<ServerResponseSchema>[] errorResponsePipes = new Pipe[routerCount];
PipeConfig<ServerResponseSchema> errConfig = ServerResponseSchema.instance.newPipeConfig(4, 512);
final boolean catchAll = builder.routerConfig().totalPathsCount() == 0;
int j = routerCount;
while (--j >= 0) {
Pipe<ServerResponseSchema>[] temp = fromModulesToOrderSuper[j] = builder.buildToOrderArray(j);
// this block is required to make sure the ordering stage has room
int c = temp.length;
while (--c >= 0) {
// ensure that the ordering stage can consume messages of this size
serverConfig.ensureServerCanWrite(temp[c].config().maxVarLenSize());
}
}
serverConfig.ensureServerCanWrite(errConfig.maxVarLenSize());
int r = routerCount;
while (--r >= 0) {
errorResponsePipes[r] = new Pipe<ServerResponseSchema>(errConfig);
fromModulesToOrderSuper[r] = PronghornStage.join(errorResponsePipes[r], fromModulesToOrderSuper[r]);
}
NetGraphBuilder.buildRouters(gm, planIncomingGroup, acks, fromRouterToModules, errorResponsePipes, routerConfig, serverCoord, catchAll);
// NOTE: this array populated here must be equal or larger than the fromModules..
Pipe<NetPayloadSchema>[] fromOrderedContent = NetGraphBuilder.buildRemainderOFServerStages(gm, serverCoord, serverConfig, handshakeIncomingGroup);
// NOTE: the fromOrderedContent must hold var len data which is greater than fromModulesToOrderSuper
NetGraphBuilder.buildOrderingSupers(gm, serverCoord, routerCount, fromModulesToOrderSuper, fromOrderedContent);
}
use of com.ociweb.pronghorn.pipe.Pipe in project GreenLightning by oci-pronghorn.
the class MsgRuntime method buildGraphForServer.
private void buildGraphForServer(MsgApp app) {
HTTPServerConfig config = builder.getHTTPServerConfig();
ServerPipesConfig serverConfig = config.buildServerConfig(builder.parallelTracks());
ServerCoordinator serverCoord = new ServerCoordinator(config.getCertificates(), config.bindHost(), config.bindPort(), serverConfig.maxConnectionBitsOnServer, serverConfig.maxConcurrentInputs, serverConfig.maxConcurrentOutputs, builder.parallelTracks(), false, "Server", config.defaultHostPath());
final int routerCount = builder.parallelTracks();
final Pipe<NetPayloadSchema>[] encryptedIncomingGroup = Pipe.buildPipes(serverConfig.maxConcurrentInputs, serverConfig.incomingDataConfig);
Pipe[] acks = NetGraphBuilder.buildSocketReaderStage(gm, serverCoord, routerCount, serverConfig, encryptedIncomingGroup);
Pipe[] handshakeIncomingGroup = null;
Pipe[] planIncomingGroup;
if (config.isTLS()) {
planIncomingGroup = Pipe.buildPipes(serverConfig.maxConcurrentInputs, serverConfig.incomingDataConfig);
handshakeIncomingGroup = NetGraphBuilder.populateGraphWithUnWrapStages(gm, serverCoord, serverConfig.serverRequestUnwrapUnits, serverConfig.handshakeDataConfig, encryptedIncomingGroup, planIncomingGroup, acks);
} else {
planIncomingGroup = encryptedIncomingGroup;
}
// Must call here so the beginning stages of the graph are drawn first when exporting graph.
app.declareBehavior(this);
buildLastHalfOfGraphForServer(app, serverConfig, serverCoord, routerCount, acks, handshakeIncomingGroup, planIncomingGroup);
}
use of com.ociweb.pronghorn.pipe.Pipe in project GreenLightning by oci-pronghorn.
the class ReactiveOperators method createPipes.
private Pipe[] createPipes(BuilderImpl builder, int i, int matches, Object listener, Grouper g) {
if (i < interfaces.size()) {
final PipeConfig config = g.config(builder.schemaMapper(schemas.get(i)));
final boolean isInUse = null != config;
final boolean doesMatch = interfaces.get(i).isInstance(listener) && isInUse;
Pipe[] result = createPipes(builder, i + 1, doesMatch ? 1 + matches : matches, listener, g);
if (doesMatch) {
// logger.info("Does Match! {}", listener);
result[matches] = new Pipe(config.grow2x());
}
return result;
} else {
return new Pipe[matches];
}
}
use of com.ociweb.pronghorn.pipe.Pipe in project GreenLightning by oci-pronghorn.
the class HTTPClientRequestTrafficStage method processMessagesForPipe.
@Override
protected void processMessagesForPipe(int activePipe) {
Pipe<ClientHTTPRequestSchema> requestPipe = input[activePipe];
while (PipeReader.hasContentToRead(requestPipe) && hasReleaseCountRemaining(activePipe) && isChannelUnBlocked(activePipe) && hasOpenConnection(requestPipe, output, ccm, activePipe) && PipeReader.tryReadFragment(requestPipe)) {
int msgIdx = PipeReader.getMsgIdx(requestPipe);
switch(msgIdx) {
case ClientHTTPRequestSchema.MSG_FASTHTTPGET_200:
{
final byte[] hostBack = Pipe.blob(requestPipe);
final int hostPos = PipeReader.readBytesPosition(requestPipe, ClientHTTPRequestSchema.MSG_FASTHTTPGET_200_FIELD_HOST_2);
final int hostLen = PipeReader.readBytesLength(requestPipe, ClientHTTPRequestSchema.MSG_FASTHTTPGET_200_FIELD_HOST_2);
final int hostMask = Pipe.blobMask(requestPipe);
int routeId = PipeReader.readInt(requestPipe, ClientHTTPRequestSchema.MSG_FASTHTTPGET_200_FIELD_DESTINATION_11);
int port = PipeReader.readInt(requestPipe, ClientHTTPRequestSchema.MSG_FASTHTTPGET_200_FIELD_PORT_1);
int userId = PipeReader.readInt(requestPipe, ClientHTTPRequestSchema.MSG_FASTHTTPGET_200_FIELD_SESSION_10);
long connectionId = PipeReader.readLong(requestPipe, ClientHTTPRequestSchema.MSG_FASTHTTPGET_200_FIELD_CONNECTIONID_20);
ClientConnection clientConnection;
if (-1 != connectionId && null != (clientConnection = (ClientConnection) ccm.connectionForSessionId(connectionId))) {
assert (clientConnection.singleUsage(stageId)) : "Only a single Stage may update the clientConnection.";
assert (routeId >= 0);
clientConnection.recordDestinationRouteId(routeId);
publishGet(requestPipe, hostBack, hostPos, hostLen, connectionId, clientConnection, ClientHTTPRequestSchema.MSG_FASTHTTPGET_200_FIELD_PATH_3, ClientHTTPRequestSchema.MSG_FASTHTTPGET_200_FIELD_HEADERS_7);
}
}
break;
case ClientHTTPRequestSchema.MSG_HTTPGET_100:
{
final byte[] hostBack = Pipe.blob(requestPipe);
final int hostPos = PipeReader.readBytesPosition(requestPipe, ClientHTTPRequestSchema.MSG_HTTPGET_100_FIELD_HOST_2);
final int hostLen = PipeReader.readBytesLength(requestPipe, ClientHTTPRequestSchema.MSG_HTTPGET_100_FIELD_HOST_2);
final int hostMask = Pipe.blobMask(requestPipe);
int routeId = PipeReader.readInt(requestPipe, ClientHTTPRequestSchema.MSG_HTTPGET_100_FIELD_DESTINATION_11);
int port = PipeReader.readInt(requestPipe, ClientHTTPRequestSchema.MSG_HTTPGET_100_FIELD_PORT_1);
int userId = PipeReader.readInt(requestPipe, ClientHTTPRequestSchema.MSG_HTTPGET_100_FIELD_SESSION_10);
long connectionId = ccm.lookup(ccm.lookupHostId(hostBack, hostPos, hostLen, hostMask), port, userId);
ClientConnection clientConnection;
if (-1 != connectionId && null != (clientConnection = (ClientConnection) ccm.connectionForSessionId(connectionId))) {
assert (clientConnection.singleUsage(stageId)) : "Only a single Stage may update the clientConnection.";
assert (routeId >= 0);
clientConnection.recordDestinationRouteId(routeId);
publishGet(requestPipe, hostBack, hostPos, hostLen, connectionId, clientConnection, ClientHTTPRequestSchema.MSG_HTTPGET_100_FIELD_PATH_3, ClientHTTPRequestSchema.MSG_HTTPGET_100_FIELD_HEADERS_7);
}
}
break;
case ClientHTTPRequestSchema.MSG_HTTPPOST_101:
{
final byte[] hostBack = Pipe.blob(requestPipe);
final int hostPos = PipeReader.readBytesPosition(requestPipe, ClientHTTPRequestSchema.MSG_HTTPPOST_101_FIELD_HOST_2);
final int hostLen = PipeReader.readBytesLength(requestPipe, ClientHTTPRequestSchema.MSG_HTTPPOST_101_FIELD_HOST_2);
final int hostMask = Pipe.blobMask(requestPipe);
int routeId = PipeReader.readInt(requestPipe, ClientHTTPRequestSchema.MSG_HTTPPOST_101_FIELD_DESTINATION_11);
int userId = PipeReader.readInt(requestPipe, ClientHTTPRequestSchema.MSG_HTTPPOST_101_FIELD_SESSION_10);
int port = PipeReader.readInt(requestPipe, ClientHTTPRequestSchema.MSG_HTTPPOST_101_FIELD_PORT_1);
long connectionId = ccm.lookup(ccm.lookupHostId(hostBack, hostPos, hostLen, hostMask), port, userId);
// openConnection(activeHost, port, userId, outIdx);
ClientConnection clientConnection;
if ((-1 != connectionId) && (null != (clientConnection = (ClientConnection) ccm.connectionForSessionId(connectionId)))) {
assert (routeId >= 0);
clientConnection.recordDestinationRouteId(routeId);
int outIdx = clientConnection.requestPipeLineIdx();
// count of messages can only be done here.
clientConnection.incRequestsSent();
Pipe<NetPayloadSchema> outputPipe = output[outIdx];
PipeWriter.presumeWriteFragment(outputPipe, NetPayloadSchema.MSG_PLAIN_210);
PipeWriter.writeLong(outputPipe, NetPayloadSchema.MSG_PLAIN_210_FIELD_CONNECTIONID_201, connectionId);
DataOutputBlobWriter<NetPayloadSchema> activeWriter = PipeWriter.outputStream(outputPipe);
DataOutputBlobWriter.openField(activeWriter);
DataOutputBlobWriter.encodeAsUTF8(activeWriter, "POST");
int len = PipeReader.readBytesLength(requestPipe, ClientHTTPRequestSchema.MSG_HTTPPOST_101_FIELD_PATH_3);
int first = PipeReader.readBytesPosition(requestPipe, ClientHTTPRequestSchema.MSG_HTTPPOST_101_FIELD_PATH_3);
boolean prePendSlash = (0 == len) || ('/' != PipeReader.readBytesBackingArray(requestPipe, ClientHTTPRequestSchema.MSG_HTTPPOST_101_FIELD_PATH_3)[first & Pipe.blobMask(requestPipe)]);
if (prePendSlash) {
// NOTE: these can be pre-coverted to bytes so we need not convert on each write. may want to improve.
DataOutputBlobWriter.encodeAsUTF8(activeWriter, " /");
} else {
DataOutputBlobWriter.encodeAsUTF8(activeWriter, " ");
}
// Reading from UTF8 field and writing to UTF8 encoded field so we are doing a direct copy here.
PipeReader.readBytes(requestPipe, ClientHTTPRequestSchema.MSG_HTTPPOST_101_FIELD_PATH_3, activeWriter);
HeaderUtil.writeHeaderBeginning(hostBack, hostPos, hostLen, Pipe.blobMask(requestPipe), activeWriter);
HeaderUtil.writeHeaderMiddle(activeWriter, implementationVersion);
PipeReader.readBytes(requestPipe, ClientHTTPRequestSchema.MSG_HTTPPOST_101_FIELD_HEADERS_7, activeWriter);
long postLength = PipeReader.readBytesLength(requestPipe, ClientHTTPRequestSchema.MSG_HTTPPOST_101_FIELD_PAYLOAD_5);
HeaderUtil.writeHeaderEnding(activeWriter, true, postLength);
// TODO: How is chunking supported, that code is not here yet but length must be -1 I think.
PipeReader.readBytes(requestPipe, ClientHTTPRequestSchema.MSG_HTTPPOST_101_FIELD_PAYLOAD_5, activeWriter);
DataOutputBlobWriter.closeHighLevelField(activeWriter, NetPayloadSchema.MSG_PLAIN_210_FIELD_PAYLOAD_204);
PipeWriter.publishWrites(outputPipe);
}
}
break;
case ClientHTTPRequestSchema.MSG_CLOSE_104:
final byte[] hostBack = Pipe.blob(requestPipe);
final int hostPos = PipeReader.readBytesPosition(requestPipe, ClientHTTPRequestSchema.MSG_CLOSE_104_FIELD_HOST_2);
final int hostLen = PipeReader.readBytesLength(requestPipe, ClientHTTPRequestSchema.MSG_CLOSE_104_FIELD_HOST_2);
final int hostMask = Pipe.blobMask(requestPipe);
final int port = PipeReader.readInt(requestPipe, ClientHTTPRequestSchema.MSG_CLOSE_104_FIELD_PORT_1);
final int userId = PipeReader.readInt(requestPipe, ClientHTTPRequestSchema.MSG_CLOSE_104_FIELD_SESSION_10);
long connectionId = ccm.lookup(ccm.lookupHostId(hostBack, hostPos, hostLen, hostMask), port, userId);
// only close if we find a live connection
if ((-1 != connectionId)) {
ClientConnection connectionToKill = (ClientConnection) ccm.connectionForSessionId(connectionId);
if (null != connectionToKill) {
Pipe<NetPayloadSchema> outputPipe = output[connectionToKill.requestPipeLineIdx()];
// do not close that will be done by last stage
// must be done first before we send the message
connectionToKill.beginDisconnect();
PipeWriter.presumeWriteFragment(outputPipe, NetPayloadSchema.MSG_DISCONNECT_203);
PipeWriter.writeLong(outputPipe, NetPayloadSchema.MSG_DISCONNECT_203_FIELD_CONNECTIONID_201, connectionId);
PipeWriter.publishWrites(outputPipe);
}
}
break;
default:
logger.info("not yet supporting message {}", msgIdx);
}
PipeReader.releaseReadLock(requestPipe);
// only do now after we know its not blocked and was completed
decReleaseCount(activePipe);
}
}
Aggregations