use of org.apache.cassandra.exceptions.OverloadedException in project cassandra by apache.
the class SimpleClientPerfTest method perfTest.
@SuppressWarnings({ "UnstableApiUsage", "UseOfSystemOutOrSystemErr", "ResultOfMethodCallIgnored" })
public void perfTest(SizeCaps requestCaps, SizeCaps responseCaps, AssertUtil.ThrowingSupplier<SimpleClient> clientSupplier, ProtocolVersion version) throws Throwable {
ResultMessage.Rows response = generateRows(0, responseCaps);
QueryMessage requestMessage = generateQueryMessage(0, requestCaps, version);
Envelope message = requestMessage.encode(version);
int requestSize = message.body.readableBytes();
message.release();
message = response.encode(version);
int responseSize = message.body.readableBytes();
message.release();
Server server = new Server.Builder().withHost(address).withPort(port).build();
ClientMetrics.instance.init(Collections.singleton(server));
server.start();
Message.Type.QUERY.unsafeSetCodec(new Message.Codec<QueryMessage>() {
public QueryMessage decode(ByteBuf body, ProtocolVersion version) {
QueryMessage queryMessage = QueryMessage.codec.decode(body, version);
return new QueryMessage(queryMessage.query, queryMessage.options) {
protected Message.Response execute(QueryState state, long queryStartNanoTime, boolean traceRequest) {
// unused
int idx = Integer.parseInt(queryMessage.query);
return generateRows(idx, responseCaps);
}
};
}
public void encode(QueryMessage queryMessage, ByteBuf dest, ProtocolVersion version) {
QueryMessage.codec.encode(queryMessage, dest, version);
}
public int encodedSize(QueryMessage queryMessage, ProtocolVersion version) {
return 0;
}
});
int threads = 1;
ExecutorService executor = Executors.newFixedThreadPool(threads);
AtomicReference<Throwable> error = new AtomicReference<>();
CountDownLatch signal = new CountDownLatch(1);
AtomicBoolean measure = new AtomicBoolean(false);
DescriptiveStatistics stats = new DescriptiveStatistics();
Lock lock = new ReentrantLock();
RateLimiter limiter = RateLimiter.create(2000);
AtomicLong overloadedExceptions = new AtomicLong(0);
// TODO: exercise client -> server large messages
for (int t = 0; t < threads; t++) {
executor.execute(() -> {
try (SimpleClient client = clientSupplier.get()) {
while (!executor.isShutdown() && error.get() == null) {
List<Message.Request> messages = new ArrayList<>();
for (int j = 0; j < 1; j++) messages.add(requestMessage);
if (measure.get()) {
try {
limiter.acquire();
long nanoStart = nanoTime();
client.execute(messages);
long elapsed = nanoTime() - nanoStart;
lock.lock();
try {
stats.addValue(TimeUnit.NANOSECONDS.toMicros(elapsed));
} finally {
lock.unlock();
}
} catch (RuntimeException e) {
if (Throwables.anyCauseMatches(e, cause -> cause instanceof OverloadedException)) {
overloadedExceptions.incrementAndGet();
} else {
throw e;
}
}
} else {
try {
limiter.acquire();
// warm-up
client.execute(messages);
} catch (RuntimeException e) {
// Ignore overloads during warmup...
if (!Throwables.anyCauseMatches(e, cause -> cause instanceof OverloadedException)) {
throw e;
}
}
}
}
} catch (Throwable e) {
e.printStackTrace();
error.set(e);
signal.countDown();
}
});
}
Assert.assertFalse(signal.await(30, TimeUnit.SECONDS));
measure.set(true);
Assert.assertFalse(signal.await(60, TimeUnit.SECONDS));
executor.shutdown();
executor.awaitTermination(10, TimeUnit.SECONDS);
System.out.println("requestSize = " + requestSize);
System.out.println("responseSize = " + responseSize);
System.out.println("Latencies (in microseconds)");
System.out.println("Elements: " + stats.getN());
System.out.println("Mean: " + stats.getMean());
System.out.println("Variance: " + stats.getVariance());
System.out.println("Median: " + stats.getPercentile(0.5));
System.out.println("90p: " + stats.getPercentile(0.90));
System.out.println("95p: " + stats.getPercentile(0.95));
System.out.println("99p: " + stats.getPercentile(0.99));
System.out.println("Max: " + stats.getMax());
System.out.println("Failed due to overload: " + overloadedExceptions.get());
server.stop();
}
use of org.apache.cassandra.exceptions.OverloadedException in project cassandra by apache.
the class StorageProxy method mutate.
/**
* Use this method to have these Mutations applied
* across all replicas. This method will take care
* of the possibility of a replica being down and hint
* the data across to some other replica.
*
* @param mutations the mutations to be applied across the replicas
* @param consistencyLevel the consistency level for the operation
* @param queryStartNanoTime the value of nanoTime() when the query started to be processed
*/
public static void mutate(List<? extends IMutation> mutations, ConsistencyLevel consistencyLevel, long queryStartNanoTime) throws UnavailableException, OverloadedException, WriteTimeoutException, WriteFailureException {
Tracing.trace("Determining replicas for mutation");
final String localDataCenter = DatabaseDescriptor.getEndpointSnitch().getLocalDatacenter();
long startTime = nanoTime();
List<AbstractWriteResponseHandler<IMutation>> responseHandlers = new ArrayList<>(mutations.size());
WriteType plainWriteType = mutations.size() <= 1 ? WriteType.SIMPLE : WriteType.UNLOGGED_BATCH;
try {
for (IMutation mutation : mutations) {
if (hasLocalMutation(mutation))
writeMetrics.localRequests.mark();
else
writeMetrics.remoteRequests.mark();
if (mutation instanceof CounterMutation)
responseHandlers.add(mutateCounter((CounterMutation) mutation, localDataCenter, queryStartNanoTime));
else
responseHandlers.add(performWrite(mutation, consistencyLevel, localDataCenter, standardWritePerformer, null, plainWriteType, queryStartNanoTime));
}
// upgrade to full quorum any failed cheap quorums
for (int i = 0; i < mutations.size(); ++i) {
if (// at the moment, only non-counter writes support cheap quorums
!(mutations.get(i) instanceof CounterMutation))
responseHandlers.get(i).maybeTryAdditionalReplicas(mutations.get(i), standardWritePerformer, localDataCenter);
}
// wait for writes. throws TimeoutException if necessary
for (AbstractWriteResponseHandler<IMutation> responseHandler : responseHandlers) responseHandler.get();
} catch (WriteTimeoutException | WriteFailureException ex) {
if (consistencyLevel == ConsistencyLevel.ANY) {
hintMutations(mutations);
} else {
if (ex instanceof WriteFailureException) {
writeMetrics.failures.mark();
writeMetricsForLevel(consistencyLevel).failures.mark();
WriteFailureException fe = (WriteFailureException) ex;
Tracing.trace("Write failure; received {} of {} required replies, failed {} requests", fe.received, fe.blockFor, fe.failureReasonByEndpoint.size());
} else {
writeMetrics.timeouts.mark();
writeMetricsForLevel(consistencyLevel).timeouts.mark();
WriteTimeoutException te = (WriteTimeoutException) ex;
Tracing.trace("Write timeout; received {} of {} required replies", te.received, te.blockFor);
}
throw ex;
}
} catch (UnavailableException e) {
writeMetrics.unavailables.mark();
writeMetricsForLevel(consistencyLevel).unavailables.mark();
Tracing.trace("Unavailable");
throw e;
} catch (OverloadedException e) {
writeMetrics.unavailables.mark();
writeMetricsForLevel(consistencyLevel).unavailables.mark();
Tracing.trace("Overloaded");
throw e;
} finally {
long latency = nanoTime() - startTime;
writeMetrics.addNano(latency);
writeMetricsForLevel(consistencyLevel).addNano(latency);
updateCoordinatorWriteLatencyTableMetric(mutations, latency);
}
}
use of org.apache.cassandra.exceptions.OverloadedException in project cassandra by apache.
the class StorageProxy method mutateMV.
/**
* Use this method to have these Mutations applied
* across all replicas.
*
* @param mutations the mutations to be applied across the replicas
* @param writeCommitLog if commitlog should be written
* @param baseComplete time from epoch in ms that the local base mutation was(or will be) completed
* @param queryStartNanoTime the value of nanoTime() when the query started to be processed
*/
public static void mutateMV(ByteBuffer dataKey, Collection<Mutation> mutations, boolean writeCommitLog, AtomicLong baseComplete, long queryStartNanoTime) throws UnavailableException, OverloadedException, WriteTimeoutException {
Tracing.trace("Determining replicas for mutation");
final String localDataCenter = DatabaseDescriptor.getEndpointSnitch().getLocalDatacenter();
long startTime = nanoTime();
try {
// if we haven't joined the ring, write everything to batchlog because paired replicas may be stale
final UUID batchUUID = UUIDGen.getTimeUUID();
if (StorageService.instance.isStarting() || StorageService.instance.isJoining() || StorageService.instance.isMoving()) {
BatchlogManager.store(Batch.createLocal(batchUUID, FBUtilities.timestampMicros(), mutations), writeCommitLog);
} else {
List<WriteResponseHandlerWrapper> wrappers = new ArrayList<>(mutations.size());
// non-local mutations rely on the base mutation commit-log entry for eventual consistency
Set<Mutation> nonLocalMutations = new HashSet<>(mutations);
Token baseToken = StorageService.instance.getTokenMetadata().partitioner.getToken(dataKey);
ConsistencyLevel consistencyLevel = ConsistencyLevel.ONE;
// Since the base -> view replication is 1:1 we only need to store the BL locally
ReplicaPlan.ForTokenWrite replicaPlan = ReplicaPlans.forLocalBatchlogWrite();
BatchlogCleanup cleanup = new BatchlogCleanup(mutations.size(), () -> asyncRemoveFromBatchlog(replicaPlan, batchUUID));
// add a handler for each mutation - includes checking availability, but doesn't initiate any writes, yet
for (Mutation mutation : mutations) {
if (hasLocalMutation(mutation))
writeMetrics.localRequests.mark();
else
writeMetrics.remoteRequests.mark();
String keyspaceName = mutation.getKeyspaceName();
Token tk = mutation.key().getToken();
AbstractReplicationStrategy replicationStrategy = Keyspace.open(keyspaceName).getReplicationStrategy();
Optional<Replica> pairedEndpoint = ViewUtils.getViewNaturalEndpoint(replicationStrategy, baseToken, tk);
EndpointsForToken pendingReplicas = StorageService.instance.getTokenMetadata().pendingEndpointsForToken(tk, keyspaceName);
// if there are no paired endpoints there are probably range movements going on, so we write to the local batchlog to replay later
if (!pairedEndpoint.isPresent()) {
if (pendingReplicas.isEmpty())
logger.warn("Received base materialized view mutation for key {} that does not belong " + "to this node. There is probably a range movement happening (move or decommission)," + "but this node hasn't updated its ring metadata yet. Adding mutation to " + "local batchlog to be replayed later.", mutation.key());
continue;
}
// write so the view mutation is sent to the pending endpoint
if (pairedEndpoint.get().isSelf() && StorageService.instance.isJoined() && pendingReplicas.isEmpty()) {
try {
mutation.apply(writeCommitLog);
nonLocalMutations.remove(mutation);
// won't trigger cleanup
cleanup.decrement();
} catch (Exception exc) {
logger.error("Error applying local view update: Mutation (keyspace {}, tables {}, partition key {})", mutation.getKeyspaceName(), mutation.getTableIds(), mutation.key());
throw exc;
}
} else {
ReplicaLayout.ForTokenWrite liveAndDown = ReplicaLayout.forTokenWrite(replicationStrategy, EndpointsForToken.of(tk, pairedEndpoint.get()), pendingReplicas);
wrappers.add(wrapViewBatchResponseHandler(mutation, consistencyLevel, consistencyLevel, liveAndDown, baseComplete, WriteType.BATCH, cleanup, queryStartNanoTime));
}
}
// Apply to local batchlog memtable in this thread
if (!nonLocalMutations.isEmpty())
BatchlogManager.store(Batch.createLocal(batchUUID, FBUtilities.timestampMicros(), nonLocalMutations), writeCommitLog);
// Perform remote writes
if (!wrappers.isEmpty())
asyncWriteBatchedMutations(wrappers, localDataCenter, Stage.VIEW_MUTATION);
}
} finally {
viewWriteMetrics.addNano(nanoTime() - startTime);
}
}
use of org.apache.cassandra.exceptions.OverloadedException in project cassandra by apache.
the class CQLMessageHandler method discardAndThrow.
private void discardAndThrow(Limit endpointReserve, Limit globalReserve, ByteBuffer buf, Envelope.Header header, int messageSize, Overload overload) {
ClientMetrics.instance.markRequestDiscarded();
logOverload(endpointReserve, globalReserve, header, messageSize);
OverloadedException exception = buildOverloadedException(endpointReserve, globalReserve, overload);
handleError(exception, header);
// Don't stop processing incoming messages, as we rely on the client to apply
// backpressure when it receives OverloadedException, but discard this message
// as we're responding with the overloaded error.
incrementReceivedMessageMetrics(messageSize);
buf.position(buf.position() + Envelope.Header.LENGTH + messageSize);
}
use of org.apache.cassandra.exceptions.OverloadedException in project cassandra by apache.
the class ClientResourceLimitsTest method testOverloadedException.
private void testOverloadedException(Supplier<SimpleClient> clientSupplier) {
try (SimpleClient client = clientSupplier.get()) {
QueryMessage queryMessage = new QueryMessage("CREATE TABLE atable (pk int PRIMARY KEY, v text)", V5_DEFAULT_OPTIONS);
client.execute(queryMessage);
queryMessage = queryMessage();
try {
client.execute(queryMessage);
fail();
} catch (RuntimeException e) {
assertTrue(e.getCause() instanceof OverloadedException);
}
}
}
Aggregations