use of org.apache.cassandra.transport.BurnTestUtil.SizeCaps in project cassandra by apache.
the class SimpleClientBurnTest method test.
@Test
public void test() throws Throwable {
SizeCaps smallMessageCap = new SizeCaps(5, 10, 5, 5);
SizeCaps largeMessageCap = new SizeCaps(1000, 2000, 5, 150);
int largeMessageFrequency = 1000;
CQLConnectionTest.AllocationObserver allocationObserver = new CQLConnectionTest.AllocationObserver();
PipelineConfigurator configurator = new PipelineConfigurator(NativeTransportService.useEpoll(), false, false, UNENCRYPTED) {
protected ClientResourceLimits.ResourceProvider resourceProvider(ClientResourceLimits.Allocator allocator) {
return BurnTestUtil.observableResourceProvider(allocationObserver).apply(allocator);
}
};
Server server = new Server.Builder().withHost(address).withPort(port).withPipelineConfigurator(configurator).build();
ClientMetrics.instance.init(Collections.singleton(server));
server.start();
Message.Type.QUERY.unsafeSetCodec(new Message.Codec<QueryMessage>() {
public QueryMessage decode(ByteBuf body, ProtocolVersion version) {
QueryMessage queryMessage = QueryMessage.codec.decode(body, version);
return new QueryMessage(queryMessage.query, queryMessage.options) {
protected Message.Response execute(QueryState state, long queryStartNanoTime, boolean traceRequest) {
int idx = Integer.parseInt(queryMessage.query);
SizeCaps caps = idx % largeMessageFrequency == 0 ? largeMessageCap : smallMessageCap;
return generateRows(idx, caps);
}
};
}
public void encode(QueryMessage queryMessage, ByteBuf dest, ProtocolVersion version) {
QueryMessage.codec.encode(queryMessage, dest, version);
}
public int encodedSize(QueryMessage queryMessage, ProtocolVersion version) {
return QueryMessage.codec.encodedSize(queryMessage, version);
}
});
List<AssertUtil.ThrowingSupplier<SimpleClient>> suppliers = Arrays.asList(() -> new SimpleClient(address.getHostAddress(), port, ProtocolVersion.V5, true, new EncryptionOptions()).connect(false), () -> new SimpleClient(address.getHostAddress(), port, ProtocolVersion.V4, false, new EncryptionOptions()).connect(false));
int threads = 3;
ExecutorService executor = Executors.newFixedThreadPool(threads);
AtomicReference<Throwable> error = new AtomicReference<>();
CountDownLatch signal = new CountDownLatch(1);
// TODO: exercise client -> server large messages
for (int t = 0; t < threads; t++) {
int threadId = t;
executor.execute(() -> {
try (SimpleClient client = suppliers.get(threadId % suppliers.size()).get()) {
int counter = 0;
while (!executor.isShutdown() && error.get() == null) {
if (counter % 100 == 0)
System.out.println("idx = " + counter);
List<Message.Request> messages = new ArrayList<>();
for (int j = 0; j < 10; j++) {
int descriptor = counter + j * 100 + threadId * 10000;
SizeCaps caps = descriptor % largeMessageFrequency == 0 ? largeMessageCap : smallMessageCap;
QueryMessage query = generateQueryMessage(descriptor, caps, client.connection.getVersion());
messages.add(query);
}
Map<Message.Request, Message.Response> responses = client.execute(messages);
for (Map.Entry<Message.Request, Message.Response> entry : responses.entrySet()) {
int idx = Integer.parseInt(((QueryMessage) entry.getKey()).query);
SizeCaps caps = idx % largeMessageFrequency == 0 ? largeMessageCap : smallMessageCap;
ResultMessage.Rows actual = ((ResultMessage.Rows) entry.getValue());
ResultMessage.Rows expected = generateRows(idx, caps);
Assert.assertEquals(expected.result.rows.size(), actual.result.rows.size());
for (int i = 0; i < expected.result.rows.size(); i++) {
List<ByteBuffer> expectedRow = expected.result.rows.get(i);
List<ByteBuffer> actualRow = actual.result.rows.get(i);
Assert.assertEquals(expectedRow.size(), actualRow.size());
for (int col = 0; col < expectedRow.size(); col++) Assert.assertEquals(expectedRow.get(col), actualRow.get(col));
}
}
counter++;
// try to trigger leak detector
System.gc();
}
} catch (Throwable e) {
e.printStackTrace();
error.set(e);
signal.countDown();
}
});
}
Assert.assertFalse(signal.await(120, TimeUnit.SECONDS));
executor.shutdown();
executor.awaitTermination(10, TimeUnit.SECONDS);
assertThat(allocationObserver.endpointAllocationTotal()).isEqualTo(allocationObserver.endpointReleaseTotal());
assertThat(allocationObserver.globalAllocationTotal()).isEqualTo(allocationObserver.globalReleaseTotal());
server.stop();
}
use of org.apache.cassandra.transport.BurnTestUtil.SizeCaps in project cassandra by apache.
the class SimpleClientPerfTest method perfTest.
@SuppressWarnings({ "UnstableApiUsage", "UseOfSystemOutOrSystemErr", "ResultOfMethodCallIgnored" })
public void perfTest(SizeCaps requestCaps, SizeCaps responseCaps, AssertUtil.ThrowingSupplier<SimpleClient> clientSupplier, ProtocolVersion version) throws Throwable {
ResultMessage.Rows response = generateRows(0, responseCaps);
QueryMessage requestMessage = generateQueryMessage(0, requestCaps, version);
Envelope message = requestMessage.encode(version);
int requestSize = message.body.readableBytes();
message.release();
message = response.encode(version);
int responseSize = message.body.readableBytes();
message.release();
Server server = new Server.Builder().withHost(address).withPort(port).build();
ClientMetrics.instance.init(Collections.singleton(server));
server.start();
Message.Type.QUERY.unsafeSetCodec(new Message.Codec<QueryMessage>() {
public QueryMessage decode(ByteBuf body, ProtocolVersion version) {
QueryMessage queryMessage = QueryMessage.codec.decode(body, version);
return new QueryMessage(queryMessage.query, queryMessage.options) {
protected Message.Response execute(QueryState state, long queryStartNanoTime, boolean traceRequest) {
// unused
int idx = Integer.parseInt(queryMessage.query);
return generateRows(idx, responseCaps);
}
};
}
public void encode(QueryMessage queryMessage, ByteBuf dest, ProtocolVersion version) {
QueryMessage.codec.encode(queryMessage, dest, version);
}
public int encodedSize(QueryMessage queryMessage, ProtocolVersion version) {
return 0;
}
});
int threads = 1;
ExecutorService executor = Executors.newFixedThreadPool(threads);
AtomicReference<Throwable> error = new AtomicReference<>();
CountDownLatch signal = new CountDownLatch(1);
AtomicBoolean measure = new AtomicBoolean(false);
DescriptiveStatistics stats = new DescriptiveStatistics();
Lock lock = new ReentrantLock();
RateLimiter limiter = RateLimiter.create(2000);
AtomicLong overloadedExceptions = new AtomicLong(0);
// TODO: exercise client -> server large messages
for (int t = 0; t < threads; t++) {
executor.execute(() -> {
try (SimpleClient client = clientSupplier.get()) {
while (!executor.isShutdown() && error.get() == null) {
List<Message.Request> messages = new ArrayList<>();
for (int j = 0; j < 1; j++) messages.add(requestMessage);
if (measure.get()) {
try {
limiter.acquire();
long nanoStart = nanoTime();
client.execute(messages);
long elapsed = nanoTime() - nanoStart;
lock.lock();
try {
stats.addValue(TimeUnit.NANOSECONDS.toMicros(elapsed));
} finally {
lock.unlock();
}
} catch (RuntimeException e) {
if (Throwables.anyCauseMatches(e, cause -> cause instanceof OverloadedException)) {
overloadedExceptions.incrementAndGet();
} else {
throw e;
}
}
} else {
try {
limiter.acquire();
// warm-up
client.execute(messages);
} catch (RuntimeException e) {
// Ignore overloads during warmup...
if (!Throwables.anyCauseMatches(e, cause -> cause instanceof OverloadedException)) {
throw e;
}
}
}
}
} catch (Throwable e) {
e.printStackTrace();
error.set(e);
signal.countDown();
}
});
}
Assert.assertFalse(signal.await(30, TimeUnit.SECONDS));
measure.set(true);
Assert.assertFalse(signal.await(60, TimeUnit.SECONDS));
executor.shutdown();
executor.awaitTermination(10, TimeUnit.SECONDS);
System.out.println("requestSize = " + requestSize);
System.out.println("responseSize = " + responseSize);
System.out.println("Latencies (in microseconds)");
System.out.println("Elements: " + stats.getN());
System.out.println("Mean: " + stats.getMean());
System.out.println("Variance: " + stats.getVariance());
System.out.println("Median: " + stats.getPercentile(0.5));
System.out.println("90p: " + stats.getPercentile(0.90));
System.out.println("95p: " + stats.getPercentile(0.95));
System.out.println("99p: " + stats.getPercentile(0.99));
System.out.println("Max: " + stats.getMax());
System.out.println("Failed due to overload: " + overloadedExceptions.get());
server.stop();
}
use of org.apache.cassandra.transport.BurnTestUtil.SizeCaps in project cassandra by apache.
the class DriverBurnTest method test.
@Test
public void test() throws Throwable {
final SizeCaps smallMessageCap = new SizeCaps(10, 20, 5, 10);
final SizeCaps largeMessageCap = new SizeCaps(1000, 2000, 5, 150);
int largeMessageFrequency = 1000;
Message.Type.QUERY.unsafeSetCodec(new Message.Codec<QueryMessage>() {
public QueryMessage decode(ByteBuf body, ProtocolVersion version) {
QueryMessage queryMessage = QueryMessage.codec.decode(body, version);
return new QueryMessage(queryMessage.query, queryMessage.options) {
protected Message.Response execute(QueryState state, long queryStartNanoTime, boolean traceRequest) {
try {
int idx = Integer.parseInt(queryMessage.query);
SizeCaps caps = idx % largeMessageFrequency == 0 ? largeMessageCap : smallMessageCap;
return generateRows(idx, caps);
} catch (NumberFormatException e) {
// for the requests driver issues under the hood
return super.execute(state, queryStartNanoTime, traceRequest);
}
}
};
}
public void encode(QueryMessage queryMessage, ByteBuf dest, ProtocolVersion version) {
QueryMessage.codec.encode(queryMessage, dest, version);
}
public int encodedSize(QueryMessage queryMessage, ProtocolVersion version) {
return 0;
}
});
List<AssertUtil.ThrowingSupplier<Cluster.Builder>> suppliers = Arrays.asList(() -> Cluster.builder().addContactPoint(nativeAddr.getHostAddress()).withProtocolVersion(com.datastax.driver.core.ProtocolVersion.V4).withPort(nativePort), () -> Cluster.builder().addContactPoint(nativeAddr.getHostAddress()).allowBetaProtocolVersion().withPort(nativePort), () -> Cluster.builder().addContactPoint(nativeAddr.getHostAddress()).withCompression(ProtocolOptions.Compression.LZ4).allowBetaProtocolVersion().withPort(nativePort), () -> Cluster.builder().addContactPoint(nativeAddr.getHostAddress()).withCompression(ProtocolOptions.Compression.LZ4).withProtocolVersion(com.datastax.driver.core.ProtocolVersion.V4).withPort(nativePort));
int threads = 10;
ExecutorService executor = Executors.newFixedThreadPool(threads);
AtomicReference<Throwable> error = new AtomicReference<>();
CountDownLatch signal = new CountDownLatch(1);
for (int t = 0; t < threads; t++) {
int threadId = t;
executor.execute(() -> {
try (Cluster driver = suppliers.get(threadId % suppliers.size()).get().build();
Session session = driver.connect()) {
int counter = 0;
while (!Thread.interrupted()) {
Map<Integer, ResultSetFuture> futures = new HashMap<>();
for (int j = 0; j < 10; j++) {
int descriptor = counter + j * 100 + threadId * 10000;
SizeCaps caps = descriptor % largeMessageFrequency == 0 ? largeMessageCap : smallMessageCap;
futures.put(j, session.executeAsync(generateQueryStatement(descriptor, caps)));
}
for (Map.Entry<Integer, ResultSetFuture> e : futures.entrySet()) {
final int j = e.getKey().intValue();
final int descriptor = counter + j * 100 + threadId * 10000;
SizeCaps caps = descriptor % largeMessageFrequency == 0 ? largeMessageCap : smallMessageCap;
ResultMessage.Rows expectedRS = generateRows(descriptor, caps);
List<Row> actualRS = e.getValue().get().all();
for (int i = 0; i < actualRS.size(); i++) {
List<ByteBuffer> expected = expectedRS.result.rows.get(i);
Row actual = actualRS.get(i);
for (int col = 0; col < expected.size(); col++) Assert.assertEquals(actual.getBytes(col), expected.get(col));
}
}
counter++;
}
} catch (Throwable e) {
e.printStackTrace();
error.set(e);
signal.countDown();
}
});
}
Assert.assertFalse(signal.await(120, TimeUnit.SECONDS));
executor.shutdown();
executor.awaitTermination(10, TimeUnit.SECONDS);
assertThat(allocationObserver.endpointAllocationTotal()).isEqualTo(allocationObserver.endpointReleaseTotal());
assertThat(allocationObserver.globalAllocationTotal()).isEqualTo(allocationObserver.globalReleaseTotal());
}
Aggregations