use of com.palantir.dialogue.Response in project dialogue by palantir.
the class SimulationTest method one_big_spike.
/**
* This simulates an alta client, which might load up some keys and then lookup each key in order to build a big
* response for the user. The goal is 100% client-perceived success here, because building up half the response
* is no good.
*/
@SimulationCase
public void one_big_spike(Strategy strategy) {
int capacity = 100;
servers = servers(SimulationServer.builder().serverName("node1").simulation(simulation).handler(h -> h.respond200UntilCapacity(429, capacity).responseTime(Duration.ofMillis(150))).build(), SimulationServer.builder().serverName("node2").simulation(simulation).handler(h -> h.respond200UntilCapacity(429, capacity).responseTime(Duration.ofMillis(150))).build());
st = strategy;
result = Benchmark.builder().simulation(simulation).requestsPerSecond(// fire off a ton of requests very quickly
30_000).numRequests(1000).client(strategy.getChannel(simulation, servers)).abortAfter(Duration.ofSeconds(10)).run();
}
use of com.palantir.dialogue.Response in project dialogue by palantir.
the class SimulationTest method server_side_rate_limits.
@SimulationCase
void server_side_rate_limits(Strategy strategy) {
double totalRateLimit = .1;
int numServers = 4;
int numClients = 2;
double perServerRateLimit = totalRateLimit / numServers;
servers = servers(IntStream.range(0, numServers).mapToObj(i -> {
Meter requestRate = new Meter(simulation.codahaleClock());
Function<SimulationServer, Response> responseFunc = _s -> {
if (requestRate.getOneMinuteRate() < perServerRateLimit) {
requestRate.mark();
return new TestResponse().code(200);
} else {
return new TestResponse().code(429);
}
};
return SimulationServer.builder().serverName("node" + i).simulation(simulation).handler(h -> h.response(responseFunc).responseTime(Duration.ofSeconds(200))).build();
}).toArray(SimulationServer[]::new));
st = strategy;
result = Benchmark.builder().simulation(simulation).requestsPerSecond(totalRateLimit).sendUntil(Duration.ofMinutes(25_000)).clients(numClients, _i -> strategy.getChannel(simulation, servers)).abortAfter(Duration.ofHours(1_000)).run();
}
use of com.palantir.dialogue.Response in project dialogue by palantir.
the class Benchmark method schedule.
@SuppressWarnings({ "FutureReturnValueIgnored", "CheckReturnValue" })
public ListenableFuture<BenchmarkResult> schedule() {
long[] requestsStarted = { 0 };
long[] responsesReceived = { 0 };
Map<String, Integer> statusCodes = new TreeMap<>();
Stopwatch scheduling = Stopwatch.createStarted();
benchmarkFinished.getFuture().addListener(simulation.metricsReporter()::report, DialogueFutures.safeDirectExecutor());
FutureCallback<Response> accumulateStatusCodes = new FutureCallback<Response>() {
@Override
public void onSuccess(Response response) {
// just being a good citizen
response.close();
statusCodes.compute(Integer.toString(response.code()), (_c, num) -> num == null ? 1 : num + 1);
}
@Override
public void onFailure(Throwable throwable) {
statusCodes.compute(throwable.getMessage(), (_c, num) -> num == null ? 1 : num + 1);
}
};
requestStream.forEach(req -> {
log.debug("Scheduling {}", req.number());
simulation.scheduler().schedule(() -> {
log.debug("time={} starting num={} {}", simulation.clock().read(), req.number(), req);
try {
ListenableFuture<Response> future = req.endpointChannel().execute(req.request());
requestsStarted[0] += 1;
Futures.addCallback(future, accumulateStatusCodes, DialogueFutures.safeDirectExecutor());
future.addListener(() -> {
responsesReceived[0] += 1;
benchmarkFinished.update(Duration.ofNanos(simulation.clock().read()), requestsStarted[0], responsesReceived[0]);
}, DialogueFutures.safeDirectExecutor());
} catch (RuntimeException e) {
log.error("Channels shouldn't throw", e);
}
}, req.sendTimeNanos() - simulation.clock().read(), TimeUnit.NANOSECONDS);
simulation.runClockTo(Optional.of(Duration.ofNanos(req.sendTimeNanos())));
});
long ms = scheduling.elapsed(TimeUnit.MILLISECONDS);
log.warn("Fired off all requests ({} ms, {}req/sec)", ms, (1000 * requestsStarted[0]) / ms);
return Futures.transform(benchmarkFinished.getFuture(), _v -> {
long numGlobalResponses = MetricNames.globalResponses(simulation.taggedMetrics()).getCount();
long leaked = numGlobalResponses - MetricNames.responseClose(simulation.taggedMetrics()).getCount();
Map<String, Snapshot> perEndpointHistograms = KeyedStream.of(endpointChannels).mapKeys(NamedEndpointChannel::name).map(namedEndpointChannel -> namedEndpointChannel.channel().perEndpointChannelTimer().getSnapshot()).collectToMap();
return ImmutableBenchmarkResult.builder().clientHistogram(MetricNames.clientGlobalResponseTimer(simulation.taggedMetrics()).getSnapshot()).endTime(Duration.ofNanos(simulation.clock().read())).statusCodes(statusCodes).successPercentage(Math.round(statusCodes.getOrDefault("200", 0) * 1000d / requestsStarted[0]) / 10d).numSent(requestsStarted[0]).numReceived(responsesReceived[0]).numGlobalResponses(numGlobalResponses).responsesLeaked(leaked).perEndpointHistograms(perEndpointHistograms).build();
}, DialogueFutures.safeDirectExecutor());
}
Aggregations