use of org.apache.cassandra.distributed.Cluster in project cassandra by apache.
the class CASTest method testSuccessfulWriteDuringRangeMovementFollowedByConflicting.
/**
* Successful write during range movement not witnessed by write after range movement
*
* - Range moves from {1, 2, 3} to {2, 3, 4}; witnessed by X (not by !X)
* - !X: Prepare and Propose to {1, 2}
* - Range movement witnessed by !X
* - Any: Prepare and Propose to {3, 4}
*/
@Ignore
@Test
public void testSuccessfulWriteDuringRangeMovementFollowedByConflicting() throws Throwable {
try (Cluster cluster = Cluster.create(4, config -> config.set("write_request_timeout", REQUEST_TIMEOUT).set("cas_contention_timeout", CONTENTION_TIMEOUT))) {
cluster.schemaChange("CREATE KEYSPACE " + KEYSPACE + " WITH replication = {'class': 'SimpleStrategy', 'replication_factor': 3};");
cluster.schemaChange("CREATE TABLE " + KEYSPACE + ".tbl (pk int, ck int, v1 int, v2 int, PRIMARY KEY (pk, ck))");
// make it so {4} is bootstrapping, and this has not propagated to other nodes yet
for (int i = 1; i <= 4; ++i) cluster.get(1).acceptsOnInstance(UnsafeGossipHelper::removeFromRing).accept(cluster.get(4));
cluster.get(4).acceptsOnInstance(UnsafeGossipHelper::addToRingBootstrapping).accept(cluster.get(4));
int pk = pk(cluster, 1, 2);
// {1} promises and accepts on !{3} => {1, 2}; commits on !{2, 3} => {1}
cluster.filters().verbs(PAXOS_PREPARE_REQ.id, READ_REQ.id).from(1).to(3).drop();
cluster.filters().verbs(PAXOS_PROPOSE_REQ.id).from(1).to(3).drop();
cluster.filters().verbs(PAXOS_COMMIT_REQ.id).from(1).to(2, 3).drop();
assertRows(cluster.coordinator(1).execute("INSERT INTO " + KEYSPACE + ".tbl (pk, ck, v1) VALUES (?, 1, 1) IF NOT EXISTS", ConsistencyLevel.ONE, pk), row(true));
// finish topology change
for (int i = 1; i <= 4; ++i) cluster.get(i).acceptsOnInstance(UnsafeGossipHelper::addToRingNormal).accept(cluster.get(4));
// {3} reads from !{2} => {3, 4}
cluster.filters().verbs(PAXOS_PREPARE_REQ.id, READ_REQ.id).from(3).to(2).drop();
assertRows(cluster.coordinator(3).execute("INSERT INTO " + KEYSPACE + ".tbl (pk, ck, v2) VALUES (?, 1, 2) IF NOT EXISTS", ConsistencyLevel.ONE, pk), row(false, pk, 1, 1, null));
// TODO: repair and verify base table state
}
}
use of org.apache.cassandra.distributed.Cluster in project cassandra by apache.
the class CASTest method simpleUpdate.
@Test
public void simpleUpdate() throws Throwable {
try (Cluster cluster = init(Cluster.create(3))) {
cluster.schemaChange("CREATE TABLE " + KEYSPACE + ".tbl (pk int, ck int, v int, PRIMARY KEY (pk, ck))");
cluster.coordinator(1).execute("INSERT INTO " + KEYSPACE + ".tbl (pk, ck, v) VALUES (1, 1, 1) IF NOT EXISTS", ConsistencyLevel.QUORUM);
assertRows(cluster.coordinator(1).execute("SELECT * FROM " + KEYSPACE + ".tbl WHERE pk = 1", ConsistencyLevel.SERIAL), row(1, 1, 1));
cluster.coordinator(1).execute("UPDATE " + KEYSPACE + ".tbl SET v = 3 WHERE pk = 1 and ck = 1 IF v = 2", ConsistencyLevel.QUORUM);
assertRows(cluster.coordinator(1).execute("SELECT * FROM " + KEYSPACE + ".tbl WHERE pk = 1", ConsistencyLevel.SERIAL), row(1, 1, 1));
cluster.coordinator(1).execute("UPDATE " + KEYSPACE + ".tbl SET v = 2 WHERE pk = 1 and ck = 1 IF v = 1", ConsistencyLevel.QUORUM);
assertRows(cluster.coordinator(1).execute("SELECT * FROM " + KEYSPACE + ".tbl WHERE pk = 1", ConsistencyLevel.SERIAL), row(1, 1, 2));
}
}
use of org.apache.cassandra.distributed.Cluster in project cassandra by apache.
the class CASTest method consistencyAfterWriteTimeoutTest.
/**
* Base test to ensure that if a write times out but with a proposal accepted by some nodes (less then quorum), and
* a following SERIAL operation does not observe that write (the node having accepted it do not participate in that
* following operation), then that write is never applied, even when the nodes having accepted the original proposal
* participate.
*
* <p>In other words, if an operation timeout, it may or may not be applied, but that "fate" is persistently decided
* by the very SERIAL operation that "succeed" (in the sense of 'not timing out or throwing some other exception').
*
* @param postTimeoutOperation1 a SERIAL operation executed after an initial write that inserts the row [0, 0] times
* out. It is executed with a QUORUM of nodes that have _not_ see the timed out
* proposal, and so that operation should expect that the [0, 0] write has not taken
* place.
* @param postTimeoutOperation2 a 2nd SERIAL operation executed _after_ {@code postTimeoutOperation1}, with no
* write executed between the 2 operation. Contrarily to the 1st operation, the QORUM
* for this operation _will_ include the node that got the proposal for the [0, 0]
* insert but didn't participated to {@code postTimeoutOperation1}}. That operation
* should also no witness that [0, 0] write (since {@code postTimeoutOperation1}
* didn't).
* @param loseCommitOfOperation1 if {@code true}, the test will also drop the "commits" messages for
* {@code postTimeoutOperation1}. In general, the test should behave the same with or
* without that flag since a value is decided as soon as it has been "accepted by
* quorum" and the commits should always be properly replayed.
*/
private void consistencyAfterWriteTimeoutTest(BiConsumer<String, ICoordinator> postTimeoutOperation1, BiConsumer<String, ICoordinator> postTimeoutOperation2, boolean loseCommitOfOperation1) throws IOException {
// not about performance, this is probably ok, even if we ideally should dug into the underlying reason.
try (Cluster cluster = init(Cluster.create(3, config -> config.set("write_request_timeout", "4000ms").set("cas_contention_timeout", CONTENTION_TIMEOUT)))) {
String table = KEYSPACE + ".t";
cluster.schemaChange("CREATE TABLE " + table + " (k int PRIMARY KEY, v int)");
// We do a CAS insertion, but have with the PROPOSE message dropped on node 1 and 2. The CAS will not get
// through and should timeout. Importantly, node 3 does receive and answer the PROPOSE.
IMessageFilters.Filter dropProposeFilter = cluster.filters().inbound().verbs(PAXOS_PROPOSE_REQ.id).from(3).to(1, 2).drop();
try {
// NOTE: the consistency below is the "commit" one, so it doesn't matter at all here.
// NOTE 2: we use node 3 as coordinator because message filters don't currently work for locally
// delivered messages and as we want to drop messages to 1 and 2, we can't use them.
cluster.coordinator(3).execute("INSERT INTO " + table + "(k, v) VALUES (0, 0) IF NOT EXISTS", ConsistencyLevel.ONE);
fail("The insertion should have timed-out");
} catch (Exception e) {
// be improved at the dtest API level.
if (!e.getClass().getSimpleName().equals("CasWriteTimeoutException"))
throw e;
} finally {
dropProposeFilter.off();
}
// Isolates node 3 and executes the SERIAL operation. As neither node 1 or 2 got the initial insert proposal,
// there is nothing to "replay" and the operation should assert the table is still empty.
IMessageFilters.Filter ignoreNode3Filter = cluster.filters().verbs(paxosAndReadVerbs()).to(3).drop();
IMessageFilters.Filter dropCommitFilter = null;
if (loseCommitOfOperation1) {
dropCommitFilter = cluster.filters().verbs(PAXOS_COMMIT_REQ.id).to(1, 2).drop();
}
try {
postTimeoutOperation1.accept(table, cluster.coordinator(1));
} finally {
ignoreNode3Filter.off();
if (dropCommitFilter != null)
dropCommitFilter.off();
}
// Node 3 is now back and we isolate node 2 to ensure the next read hits node 1 and 3.
// What we want to ensure is that despite node 3 having the initial insert in its paxos state in a position of
// being replayed, that insert is _not_ replayed (it would contradict serializability since the previous
// operation asserted nothing was inserted). It is this execution that failed before CASSANDRA-12126.
IMessageFilters.Filter ignoreNode2Filter = cluster.filters().verbs(paxosAndReadVerbs()).to(2).drop();
try {
postTimeoutOperation2.accept(table, cluster.coordinator(1));
} finally {
ignoreNode2Filter.off();
}
}
}
use of org.apache.cassandra.distributed.Cluster in project cassandra by apache.
the class CASTest method incompletePropose.
@Test
public void incompletePropose() throws Throwable {
try (Cluster cluster = init(Cluster.create(3, config -> config.set("write_request_timeout", REQUEST_TIMEOUT).set("cas_contention_timeout", CONTENTION_TIMEOUT)))) {
cluster.schemaChange("CREATE TABLE " + KEYSPACE + ".tbl (pk int, ck int, v int, PRIMARY KEY (pk, ck))");
IMessageFilters.Filter drop1 = cluster.filters().verbs(PAXOS_PROPOSE_REQ.id).from(1).to(2, 3).drop();
try {
cluster.coordinator(1).execute("INSERT INTO " + KEYSPACE + ".tbl (pk, ck, v) VALUES (1, 1, 1) IF NOT EXISTS", ConsistencyLevel.QUORUM);
Assert.fail();
} catch (RuntimeException e) {
Assert.assertEquals("CAS operation timed out - encountered contentions: 0", e.getMessage());
}
drop1.off();
// make sure we encounter one of the in-progress proposals so we complete it
cluster.filters().verbs(PAXOS_PREPARE_REQ.id).from(1).to(2).drop();
cluster.coordinator(1).execute("UPDATE " + KEYSPACE + ".tbl SET v = 2 WHERE pk = 1 and ck = 1 IF v = 1", ConsistencyLevel.QUORUM);
assertRows(cluster.coordinator(1).execute("SELECT * FROM " + KEYSPACE + ".tbl WHERE pk = 1", ConsistencyLevel.SERIAL), row(1, 1, 2));
}
}
use of org.apache.cassandra.distributed.Cluster in project cassandra by apache.
the class CASTest method incompletePrepare.
@Test
public void incompletePrepare() throws Throwable {
try (Cluster cluster = init(Cluster.create(3, config -> config.set("write_request_timeout", REQUEST_TIMEOUT).set("cas_contention_timeout", CONTENTION_TIMEOUT)))) {
cluster.schemaChange("CREATE TABLE " + KEYSPACE + ".tbl (pk int, ck int, v int, PRIMARY KEY (pk, ck))");
IMessageFilters.Filter drop = cluster.filters().verbs(PAXOS_PREPARE_REQ.id).from(1).to(2, 3).drop();
try {
cluster.coordinator(1).execute("INSERT INTO " + KEYSPACE + ".tbl (pk, ck, v) VALUES (1, 1, 1) IF NOT EXISTS", ConsistencyLevel.QUORUM);
Assert.fail();
} catch (RuntimeException e) {
Assert.assertEquals("CAS operation timed out - encountered contentions: 0", e.getMessage());
}
drop.off();
cluster.coordinator(1).execute("UPDATE " + KEYSPACE + ".tbl SET v = 2 WHERE pk = 1 and ck = 1 IF v = 1", ConsistencyLevel.QUORUM);
assertRows(cluster.coordinator(1).execute("SELECT * FROM " + KEYSPACE + ".tbl WHERE pk = 1", ConsistencyLevel.SERIAL));
}
}
Aggregations