use of io.perfmark.Link in project grpc-java by grpc.
the class MigratingThreadDeframer method request.
// May be called from an arbitrary thread
@Override
public void request(final int numMessages) {
class RequestOp implements Op {
@Override
public void run(boolean isDeframerOnTransportThread) {
if (isDeframerOnTransportThread) {
final Link link = PerfMark.linkOut();
// We may not be currently on the transport thread, so jump over to it and then do the
// necessary processing
transportExecutor.runOnTransportThread(new Runnable() {
@Override
public void run() {
PerfMark.startTask("MigratingThreadDeframer.request");
PerfMark.linkIn(link);
try {
// Since processing continues from transport thread while this runnable was
// enqueued, the state may have changed since we ran runOnTransportThread. So we
// must make sure deframerOnTransportThread==true
requestFromTransportThread(numMessages);
} finally {
PerfMark.stopTask("MigratingThreadDeframer.request");
}
}
});
return;
}
PerfMark.startTask("MigratingThreadDeframer.request");
try {
deframer.request(numMessages);
} catch (Throwable t) {
appListener.deframeFailed(t);
// unrecoverable state
deframer.close();
} finally {
PerfMark.stopTask("MigratingThreadDeframer.request");
}
}
}
runWhereAppropriate(new RequestOp(), false);
}
use of io.perfmark.Link in project grpc-java by grpc.
the class MigratingThreadDeframer method runWhereAppropriate.
private boolean runWhereAppropriate(Op op, boolean currentThreadIsTransportThread) {
boolean deframerOnTransportThreadCopy;
boolean alreadyEnqueued;
synchronized (lock) {
deframerOnTransportThreadCopy = deframerOnTransportThread;
alreadyEnqueued = messageProducerEnqueued;
if (!deframerOnTransportThreadCopy) {
opQueue.offer(op);
messageProducerEnqueued = true;
}
}
if (deframerOnTransportThreadCopy) {
op.run(/*isDeframerOnTransportThread=*/
true);
return true;
} else {
if (!alreadyEnqueued) {
if (currentThreadIsTransportThread) {
PerfMark.startTask("MigratingThreadDeframer.messageAvailable");
try {
transportListener.messagesAvailable(messageProducer);
} finally {
PerfMark.stopTask("MigratingThreadDeframer.messageAvailable");
}
} else {
final Link link = PerfMark.linkOut();
// SLOW path. This is the "normal" thread-hopping approach for request() when _not_ using
// MigratingThreadDeframer
transportExecutor.runOnTransportThread(new Runnable() {
@Override
public void run() {
PerfMark.startTask("MigratingThreadDeframer.messageAvailable");
PerfMark.linkIn(link);
try {
transportListener.messagesAvailable(messageProducer);
} finally {
PerfMark.stopTask("MigratingThreadDeframer.messageAvailable");
}
}
});
}
}
return false;
}
}
use of io.perfmark.Link in project grpc-java by grpc.
the class AsyncSink method write.
@Override
public void write(Buffer source, long byteCount) throws IOException {
checkNotNull(source, "source");
if (closed) {
throw new IOException("closed");
}
PerfMark.startTask("AsyncSink.write");
try {
synchronized (lock) {
buffer.write(source, byteCount);
if (writeEnqueued || flushEnqueued || buffer.completeSegmentByteCount() <= 0) {
return;
}
writeEnqueued = true;
}
serializingExecutor.execute(new WriteRunnable() {
final Link link = PerfMark.linkOut();
@Override
public void doRun() throws IOException {
PerfMark.startTask("WriteRunnable.runWrite");
PerfMark.linkIn(link);
Buffer buf = new Buffer();
try {
synchronized (lock) {
buf.write(buffer, buffer.completeSegmentByteCount());
writeEnqueued = false;
}
sink.write(buf, buf.size());
} finally {
PerfMark.stopTask("WriteRunnable.runWrite");
}
}
});
} finally {
PerfMark.stopTask("AsyncSink.write");
}
}
use of io.perfmark.Link in project zuul by Netflix.
the class BaseZuulFilterRunner method filter.
protected final O filter(final ZuulFilter<I, O> filter, final I inMesg) {
final long startTime = System.nanoTime();
final ZuulMessage snapshot = inMesg.getContext().debugRouting() ? inMesg.clone() : null;
FilterChainResumer resumer = null;
try (TaskCloseable ignored = traceTask(filter, f -> f.filterName() + ".filter")) {
addPerfMarkTags(inMesg);
ExecutionStatus filterRunStatus = null;
if (filter.filterType() == INBOUND && inMesg.getContext().shouldSendErrorResponse()) {
// Pass request down the pipeline, all the way to error endpoint if error response needs to be generated
filterRunStatus = SKIPPED;
}
;
try (TaskCloseable ignored2 = traceTask(filter, f -> f.filterName() + ".shouldSkipFilter")) {
if (shouldSkipFilter(inMesg, filter)) {
filterRunStatus = SKIPPED;
}
}
if (filter.isDisabled()) {
filterRunStatus = DISABLED;
}
if (filterRunStatus != null) {
recordFilterCompletion(filterRunStatus, filter, startTime, inMesg, snapshot);
return filter.getDefaultOutput(inMesg);
}
if (!isMessageBodyReadyForFilter(filter, inMesg)) {
setFilterAwaitingBody(inMesg, true);
logger.debug("Filter {} waiting for body, UUID {}", filter.filterName(), inMesg.getContext().getUUID());
// wait for whole body to be buffered
return null;
}
setFilterAwaitingBody(inMesg, false);
if (snapshot != null) {
Debug.addRoutingDebug(inMesg.getContext(), "Filter " + filter.filterType().toString() + " " + filter.filterOrder() + " " + filter.filterName());
}
// run body contents accumulated so far through this filter
inMesg.runBufferedBodyContentThroughFilter(filter);
if (filter.getSyncType() == FilterSyncType.SYNC) {
final SyncZuulFilter<I, O> syncFilter = (SyncZuulFilter) filter;
final O outMesg;
try (TaskCloseable ignored2 = traceTask(filter, f -> f.filterName() + ".apply")) {
addPerfMarkTags(inMesg);
outMesg = syncFilter.apply(inMesg);
}
recordFilterCompletion(SUCCESS, filter, startTime, inMesg, snapshot);
return (outMesg != null) ? outMesg : filter.getDefaultOutput(inMesg);
}
// async filter
try (TaskCloseable ignored2 = traceTask(filter, f -> f.filterName() + ".applyAsync")) {
final Link nettyToSchedulerLink = linkOut();
filter.incrementConcurrency();
resumer = new FilterChainResumer(inMesg, filter, snapshot, startTime);
filter.applyAsync(inMesg).doOnSubscribe(() -> {
try (TaskCloseable ignored3 = traceTask(filter, f -> f.filterName() + ".onSubscribeAsync")) {
linkIn(nettyToSchedulerLink);
}
}).doOnNext(resumer.onNextStarted(nettyToSchedulerLink)).doOnError(resumer.onErrorStarted(nettyToSchedulerLink)).doOnCompleted(resumer.onCompletedStarted(nettyToSchedulerLink)).observeOn(Schedulers.from(getChannelHandlerContext(inMesg).executor())).doOnUnsubscribe(resumer::decrementConcurrency).subscribe(resumer);
}
// wait for the async filter to finish
return null;
} catch (Throwable t) {
if (resumer != null) {
resumer.decrementConcurrency();
}
final O outMesg = handleFilterException(inMesg, filter, t);
outMesg.finishBufferedBodyIfIncomplete();
recordFilterCompletion(FAILED, filter, startTime, inMesg, snapshot);
return outMesg;
}
}
Aggregations