use of org.apache.cassandra.transport.ClientResourceLimits.Overload in project cassandra by apache.
the class CQLMessageHandler method processOneContainedMessage.
/**
* Checks limits on bytes in flight and the request rate limiter (if enabled), then takes one of three actions:
*
* 1.) If no limits are breached, process the request.
* 2.) If a limit is breached, and the connection is configured to throw on overload, throw {@link OverloadedException}.
* 3.) If a limit is breached, and the connection is not configurd to throw, process the request, and return false
* to let the {@link FrameDecoder} know it should stop processing frames.
*
* If the connection is configured to throw {@link OverloadedException}, requests that breach the rate limit are
* not counted against that limit.
*
* @return true if the {@link FrameDecoder} should continue to process incoming frames, and false if it should stop
* processing them, effectively applying backpressure to clients
*
* @throws ErrorMessage.WrappedException with an {@link OverloadedException} if overload occurs and the
* connection is configured to throw on overload
*/
protected boolean processOneContainedMessage(ShareableBytes bytes, Limit endpointReserve, Limit globalReserve) {
ByteBuffer buf = bytes.get();
Envelope.Decoder.HeaderExtractionResult extracted = envelopeDecoder.extractHeader(buf);
if (!extracted.isSuccess())
return handleProtocolException(extracted.error(), buf, extracted.streamId(), extracted.bodyLength());
Envelope.Header header = extracted.header();
if (header.version != version) {
ProtocolException error = new ProtocolException(String.format("Invalid message version. Got %s but previous" + "messages on this connection had version %s", header.version, version));
return handleProtocolException(error, buf, header.streamId, header.bodySizeInBytes);
}
// max CQL message size defaults to 256mb, so should be safe to downcast
int messageSize = Ints.checkedCast(header.bodySizeInBytes);
if (throwOnOverload) {
if (!acquireCapacity(header, endpointReserve, globalReserve)) {
discardAndThrow(endpointReserve, globalReserve, buf, header, messageSize, Overload.BYTES_IN_FLIGHT);
return true;
}
if (DatabaseDescriptor.getNativeTransportRateLimitingEnabled() && !requestRateLimiter.tryReserve()) {
// We've already allocated against the bytes-in-flight limits, so release those resources.
release(header);
discardAndThrow(endpointReserve, globalReserve, buf, header, messageSize, Overload.REQUESTS);
return true;
}
} else {
Overload backpressure = Overload.NONE;
if (!acquireCapacityAndQueueOnFailure(header, endpointReserve, globalReserve)) {
if (processRequestAndUpdateMetrics(bytes, header, messageSize, Overload.BYTES_IN_FLIGHT)) {
if (decoder.isActive())
ClientMetrics.instance.pauseConnection();
}
backpressure = Overload.BYTES_IN_FLIGHT;
}
if (DatabaseDescriptor.getNativeTransportRateLimitingEnabled()) {
// Reserve a permit even if we've already triggered backpressure on bytes in flight.
long delay = requestRateLimiter.reserveAndGetDelay(RATE_LIMITER_DELAY_UNIT);
if (backpressure == Overload.NONE && delay > 0) {
if (processRequestAndUpdateMetrics(bytes, header, messageSize, Overload.REQUESTS)) {
if (decoder.isActive())
ClientMetrics.instance.pauseConnection();
// Schedule a wakup here if we process successfully. The connection should be closing otherwise.
scheduleConnectionWakeupTask(delay, RATE_LIMITER_DELAY_UNIT);
}
backpressure = Overload.REQUESTS;
}
}
// If we triggered backpressure, make sure the caller stops processing frames after the request completes.
if (backpressure != Overload.NONE)
return false;
}
return processRequestAndUpdateMetrics(bytes, header, messageSize, Overload.NONE);
}
Aggregations