use of org.apache.cassandra.exceptions.ReadTimeoutException in project cassandra by apache.
the class StorageProxy method readWithPaxos.
private static PartitionIterator readWithPaxos(SinglePartitionReadCommand.Group group, ConsistencyLevel consistencyLevel, ClientState state, long queryStartNanoTime) throws InvalidRequestException, UnavailableException, ReadFailureException, ReadTimeoutException {
assert state != null;
if (group.queries.size() > 1)
throw new InvalidRequestException("SERIAL/LOCAL_SERIAL consistency may only be requested for one partition at a time");
long start = nanoTime();
SinglePartitionReadCommand command = group.queries.get(0);
TableMetadata metadata = command.metadata();
DecoratedKey key = command.partitionKey();
// calculate the blockFor before repair any paxos round to avoid RS being altered in between.
int blockForRead = consistencyLevel.blockFor(Keyspace.open(metadata.keyspace).getReplicationStrategy());
PartitionIterator result = null;
try {
final ConsistencyLevel consistencyForReplayCommitsOrFetch = consistencyLevel == ConsistencyLevel.LOCAL_SERIAL ? ConsistencyLevel.LOCAL_QUORUM : ConsistencyLevel.QUORUM;
try {
// Commit an empty update to make sure all in-progress updates that should be finished first is, _and_
// that no other in-progress can get resurrected.
Supplier<Pair<PartitionUpdate, RowIterator>> updateProposer = Paxos.getPaxosVariant() == Config.PaxosVariant.v1_without_linearizable_reads ? () -> null : () -> Pair.create(PartitionUpdate.emptyUpdate(metadata, key), null);
// When replaying, we commit at quorum/local quorum, as we want to be sure the following read (done at
// quorum/local_quorum) sees any replayed updates. Our own update is however empty, and those don't even
// get committed due to an optimiation described in doPaxos/beingRepairAndPaxos, so the commit
// consistency is irrelevant (we use ANY just to emphasis that we don't wait on our commit).
doPaxos(metadata, key, consistencyLevel, consistencyForReplayCommitsOrFetch, ConsistencyLevel.ANY, start, casReadMetrics, updateProposer);
} catch (WriteTimeoutException e) {
throw new ReadTimeoutException(consistencyLevel, 0, blockForRead, false);
} catch (WriteFailureException e) {
throw new ReadFailureException(consistencyLevel, e.received, e.blockFor, false, e.failureReasonByEndpoint);
}
result = fetchRows(group.queries, consistencyForReplayCommitsOrFetch, queryStartNanoTime);
} catch (UnavailableException e) {
readMetrics.unavailables.mark();
casReadMetrics.unavailables.mark();
readMetricsForLevel(consistencyLevel).unavailables.mark();
logRequestException(e, group.queries);
throw e;
} catch (ReadTimeoutException e) {
readMetrics.timeouts.mark();
casReadMetrics.timeouts.mark();
readMetricsForLevel(consistencyLevel).timeouts.mark();
logRequestException(e, group.queries);
throw e;
} catch (ReadAbortException e) {
readMetrics.markAbort(e);
casReadMetrics.markAbort(e);
readMetricsForLevel(consistencyLevel).markAbort(e);
throw e;
} catch (ReadFailureException e) {
readMetrics.failures.mark();
casReadMetrics.failures.mark();
readMetricsForLevel(consistencyLevel).failures.mark();
throw e;
} finally {
long latency = nanoTime() - start;
readMetrics.addNano(latency);
casReadMetrics.addNano(latency);
readMetricsForLevel(consistencyLevel).addNano(latency);
Keyspace.open(metadata.keyspace).getColumnFamilyStore(metadata.name).metric.coordinatorReadLatency.update(latency, TimeUnit.NANOSECONDS);
}
return result;
}
use of org.apache.cassandra.exceptions.ReadTimeoutException in project cassandra by apache.
the class BlockingReadRepairs method createRepairMutation.
/**
* Create a read repair mutation from the given update, if the mutation is not larger than the maximum
* mutation size, otherwise return null. Or, if we're configured to be strict, throw an exception.
*/
public static Mutation createRepairMutation(PartitionUpdate update, ConsistencyLevel consistency, InetAddressAndPort destination, boolean suppressException) {
if (update == null)
return null;
DecoratedKey key = update.partitionKey();
Mutation mutation = new Mutation(update);
int messagingVersion = MessagingService.instance().versions.get(destination);
try {
mutation.validateSize(messagingVersion, 0);
return mutation;
} catch (MutationExceededMaxSizeException e) {
Keyspace keyspace = Keyspace.open(mutation.getKeyspaceName());
TableMetadata metadata = update.metadata();
if (DROP_OVERSIZED_READ_REPAIR_MUTATIONS) {
logger.debug("Encountered an oversized ({}/{}) read repair mutation for table {}, key {}, node {}", e.mutationSize, MAX_MUTATION_SIZE, metadata, metadata.partitionKeyType.getString(key.getKey()), destination);
} else {
logger.warn("Encountered an oversized ({}/{}) read repair mutation for table {}, key {}, node {}", e.mutationSize, MAX_MUTATION_SIZE, metadata, metadata.partitionKeyType.getString(key.getKey()), destination);
if (!suppressException) {
int blockFor = consistency.blockFor(keyspace.getReplicationStrategy());
Tracing.trace("Timed out while read-repairing after receiving all {} data and digest responses", blockFor);
throw new ReadTimeoutException(consistency, blockFor - 1, blockFor, true);
}
}
return null;
}
}
Aggregations