use of org.apache.cassandra.exceptions.WriteTimeoutException in project cassandra by apache.
the class ErrorMessageTest method testV4CasWriteTimeoutSerDeser.
@Test
public void testV4CasWriteTimeoutSerDeser() {
int contentions = 1;
int receivedBlockFor = 3;
ConsistencyLevel consistencyLevel = ConsistencyLevel.SERIAL;
CasWriteTimeoutException ex = new CasWriteTimeoutException(WriteType.CAS, consistencyLevel, receivedBlockFor, receivedBlockFor, contentions);
ErrorMessage deserialized = encodeThenDecode(ErrorMessage.fromException(ex), ProtocolVersion.V4);
assertTrue(deserialized.error instanceof WriteTimeoutException);
assertFalse(deserialized.error instanceof CasWriteTimeoutException);
WriteTimeoutException deserializedEx = (WriteTimeoutException) deserialized.error;
assertEquals(WriteType.CAS, deserializedEx.writeType);
assertEquals(consistencyLevel, deserializedEx.consistency);
assertEquals(receivedBlockFor, deserializedEx.received);
assertEquals(receivedBlockFor, deserializedEx.blockFor);
}
use of org.apache.cassandra.exceptions.WriteTimeoutException in project cassandra by apache.
the class MutationVerbHandler method doVerb.
public void doVerb(MessageIn<Mutation> message, int id) throws IOException {
// Check if there were any forwarding headers in this message
byte[] from = message.parameters.get(Mutation.FORWARD_FROM);
InetAddress replyTo;
if (from == null) {
replyTo = message.from;
byte[] forwardBytes = message.parameters.get(Mutation.FORWARD_TO);
if (forwardBytes != null)
forwardToLocalNodes(message.payload, message.verb, forwardBytes, message.from);
} else {
replyTo = InetAddress.getByAddress(from);
}
try {
message.payload.applyFuture().thenAccept(o -> reply(id, replyTo)).exceptionally(wto -> {
failed();
return null;
});
} catch (WriteTimeoutException wto) {
failed();
}
}
use of org.apache.cassandra.exceptions.WriteTimeoutException in project cassandra by apache.
the class StorageProxy method mutate.
/**
* Use this method to have these Mutations applied
* across all replicas. This method will take care
* of the possibility of a replica being down and hint
* the data across to some other replica.
*
* @param mutations the mutations to be applied across the replicas
* @param consistencyLevel the consistency level for the operation
* @param queryStartNanoTime the value of nanoTime() when the query started to be processed
*/
public static void mutate(List<? extends IMutation> mutations, ConsistencyLevel consistencyLevel, long queryStartNanoTime) throws UnavailableException, OverloadedException, WriteTimeoutException, WriteFailureException {
Tracing.trace("Determining replicas for mutation");
final String localDataCenter = DatabaseDescriptor.getEndpointSnitch().getLocalDatacenter();
long startTime = nanoTime();
List<AbstractWriteResponseHandler<IMutation>> responseHandlers = new ArrayList<>(mutations.size());
WriteType plainWriteType = mutations.size() <= 1 ? WriteType.SIMPLE : WriteType.UNLOGGED_BATCH;
try {
for (IMutation mutation : mutations) {
if (hasLocalMutation(mutation))
writeMetrics.localRequests.mark();
else
writeMetrics.remoteRequests.mark();
if (mutation instanceof CounterMutation)
responseHandlers.add(mutateCounter((CounterMutation) mutation, localDataCenter, queryStartNanoTime));
else
responseHandlers.add(performWrite(mutation, consistencyLevel, localDataCenter, standardWritePerformer, null, plainWriteType, queryStartNanoTime));
}
// upgrade to full quorum any failed cheap quorums
for (int i = 0; i < mutations.size(); ++i) {
if (// at the moment, only non-counter writes support cheap quorums
!(mutations.get(i) instanceof CounterMutation))
responseHandlers.get(i).maybeTryAdditionalReplicas(mutations.get(i), standardWritePerformer, localDataCenter);
}
// wait for writes. throws TimeoutException if necessary
for (AbstractWriteResponseHandler<IMutation> responseHandler : responseHandlers) responseHandler.get();
} catch (WriteTimeoutException | WriteFailureException ex) {
if (consistencyLevel == ConsistencyLevel.ANY) {
hintMutations(mutations);
} else {
if (ex instanceof WriteFailureException) {
writeMetrics.failures.mark();
writeMetricsForLevel(consistencyLevel).failures.mark();
WriteFailureException fe = (WriteFailureException) ex;
Tracing.trace("Write failure; received {} of {} required replies, failed {} requests", fe.received, fe.blockFor, fe.failureReasonByEndpoint.size());
} else {
writeMetrics.timeouts.mark();
writeMetricsForLevel(consistencyLevel).timeouts.mark();
WriteTimeoutException te = (WriteTimeoutException) ex;
Tracing.trace("Write timeout; received {} of {} required replies", te.received, te.blockFor);
}
throw ex;
}
} catch (UnavailableException e) {
writeMetrics.unavailables.mark();
writeMetricsForLevel(consistencyLevel).unavailables.mark();
Tracing.trace("Unavailable");
throw e;
} catch (OverloadedException e) {
writeMetrics.unavailables.mark();
writeMetricsForLevel(consistencyLevel).unavailables.mark();
Tracing.trace("Overloaded");
throw e;
} finally {
long latency = nanoTime() - startTime;
writeMetrics.addNano(latency);
writeMetricsForLevel(consistencyLevel).addNano(latency);
updateCoordinatorWriteLatencyTableMetric(mutations, latency);
}
}
use of org.apache.cassandra.exceptions.WriteTimeoutException in project cassandra by apache.
the class StorageProxy method beginAndRepairPaxos.
/**
* begin a Paxos session by sending a prepare request and completing any in-progress requests seen in the replies
*
* @return the Paxos ballot promised by the replicas if no in-progress requests were seen and a quorum of
* nodes have seen the mostRecentCommit. Otherwise, return null.
*/
private static PaxosBallotAndContention beginAndRepairPaxos(long queryStartNanoTime, DecoratedKey key, TableMetadata metadata, ReplicaPlan.ForPaxosWrite paxosPlan, ConsistencyLevel consistencyForPaxos, ConsistencyLevel consistencyForCommit, CASClientRequestMetrics casMetrics) throws WriteTimeoutException, WriteFailureException {
long timeoutNanos = DatabaseDescriptor.getCasContentionTimeout(NANOSECONDS);
PrepareCallback summary = null;
int contentions = 0;
while (nanoTime() - queryStartNanoTime < timeoutNanos) {
// We want a timestamp that is guaranteed to be unique for that node (so that the ballot is globally unique), but if we've got a prepare rejected
// already we also want to make sure we pick a timestamp that has a chance to be promised, i.e. one that is greater that the most recently known
// in progress (#5667). Lastly, we don't want to use a timestamp that is older than the last one assigned by ClientState or operations may appear
// out-of-order (#7801).
long minTimestampMicrosToUse = summary == null ? Long.MIN_VALUE : 1 + UUIDGen.microsTimestamp(summary.mostRecentInProgressCommit.ballot);
long ballotMicros = nextBallotTimestampMicros(minTimestampMicrosToUse);
// Note that ballotMicros is not guaranteed to be unique if two proposal are being handled concurrently by the same coordinator. But we still
// need ballots to be unique for each proposal so we have to use getRandomTimeUUIDFromMicros.
UUID ballot = randomBallot(ballotMicros, consistencyForPaxos == SERIAL);
// prepare
try {
Tracing.trace("Preparing {}", ballot);
Commit toPrepare = Commit.newPrepare(key, metadata, ballot);
summary = preparePaxos(toPrepare, paxosPlan, queryStartNanoTime);
if (!summary.promised) {
Tracing.trace("Some replicas have already promised a higher ballot than ours; aborting");
contentions++;
// sleep a random amount to give the other proposer a chance to finish
Uninterruptibles.sleepUninterruptibly(ThreadLocalRandom.current().nextInt(100), MILLISECONDS);
continue;
}
Commit inProgress = summary.mostRecentInProgressCommit;
Commit mostRecent = summary.mostRecentCommit;
// doing is more efficient, so we do so.
if (!inProgress.update.isEmpty() && inProgress.isAfter(mostRecent)) {
Tracing.trace("Finishing incomplete paxos round {}", inProgress);
casMetrics.unfinishedCommit.inc();
Commit refreshedInProgress = Commit.newProposal(ballot, inProgress.update);
if (proposePaxos(refreshedInProgress, paxosPlan, false, queryStartNanoTime)) {
commitPaxos(refreshedInProgress, consistencyForCommit, false, queryStartNanoTime);
} else {
Tracing.trace("Some replicas have already promised a higher ballot than ours; aborting");
// sleep a random amount to give the other proposer a chance to finish
contentions++;
Uninterruptibles.sleepUninterruptibly(ThreadLocalRandom.current().nextInt(100), MILLISECONDS);
}
continue;
}
// To be able to propose our value on a new round, we need a quorum of replica to have learn the previous one. Why is explained at:
// https://issues.apache.org/jira/browse/CASSANDRA-5062?focusedCommentId=13619810&page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel#comment-13619810)
// Since we waited for quorum nodes, if some of them haven't seen the last commit (which may just be a timing issue, but may also
// mean we lost messages), we pro-actively "repair" those nodes, and retry.
int nowInSec = Ints.checkedCast(TimeUnit.MICROSECONDS.toSeconds(ballotMicros));
Iterable<InetAddressAndPort> missingMRC = summary.replicasMissingMostRecentCommit(metadata, nowInSec);
if (Iterables.size(missingMRC) > 0) {
Tracing.trace("Repairing replicas that missed the most recent commit");
sendCommit(mostRecent, missingMRC);
// latter ticket, we can pass CL.ALL to the commit above and remove the 'continue'.
continue;
}
return new PaxosBallotAndContention(ballot, contentions);
} catch (WriteTimeoutException e) {
// We're still doing preparation for the paxos rounds, so we want to use the CAS (see CASSANDRA-8672)
throw new CasWriteTimeoutException(WriteType.CAS, e.consistency, e.received, e.blockFor, contentions);
}
}
throw new CasWriteTimeoutException(WriteType.CAS, consistencyForPaxos, 0, consistencyForPaxos.blockFor(paxosPlan.replicationStrategy()), contentions);
}
use of org.apache.cassandra.exceptions.WriteTimeoutException in project cassandra by apache.
the class StorageProxy method preparePaxos.
private static PrepareCallback preparePaxos(Commit toPrepare, ReplicaPlan.ForPaxosWrite replicaPlan, long queryStartNanoTime) throws WriteTimeoutException {
PrepareCallback callback = new PrepareCallback(toPrepare.update.partitionKey(), toPrepare.update.metadata(), replicaPlan.requiredParticipants(), replicaPlan.consistencyLevel(), queryStartNanoTime);
Message<Commit> message = Message.out(PAXOS_PREPARE_REQ, toPrepare);
boolean hasLocalRequest = false;
for (Replica replica : replicaPlan.contacts()) {
if (replica.isSelf()) {
hasLocalRequest = true;
PAXOS_PREPARE_REQ.stage.execute(() -> {
try {
callback.onResponse(message.responseWith(doPrepare(toPrepare)));
} catch (Exception ex) {
logger.error("Failed paxos prepare locally", ex);
}
});
} else {
MessagingService.instance().sendWithCallback(message, replica.endpoint(), callback);
}
}
if (hasLocalRequest)
writeMetrics.localRequests.mark();
else
writeMetrics.remoteRequests.mark();
callback.await();
return callback;
}
Aggregations