Search in sources :

Example 1 with Property

use of net.jqwik.api.Property in project kafka by apache.

the class RaftEventSimulationTest method canRecoverFromSingleNodeCommittedDataLoss.

@Property(tries = 100, afterFailure = AfterFailureMode.SAMPLE_ONLY)
void canRecoverFromSingleNodeCommittedDataLoss(@ForAll int seed, @ForAll @IntRange(min = 3, max = 5) int numVoters, @ForAll @IntRange(min = 0, max = 2) int numObservers) {
    // We run this test without the `MonotonicEpoch` and `MajorityReachedHighWatermark`
    // invariants since the loss of committed data on one node can violate them.
    Random random = new Random(seed);
    Cluster cluster = new Cluster(numVoters, numObservers, random);
    EventScheduler scheduler = new EventScheduler(cluster.random, cluster.time);
    scheduler.addInvariant(new MonotonicHighWatermark(cluster));
    scheduler.addInvariant(new SingleLeader(cluster));
    scheduler.addValidation(new ConsistentCommittedData(cluster));
    MessageRouter router = new MessageRouter(cluster);
    cluster.startAll();
    schedulePolling(scheduler, cluster, 3, 5);
    scheduler.schedule(router::deliverAll, 0, 2, 5);
    scheduler.schedule(new SequentialAppendAction(cluster), 0, 2, 3);
    scheduler.runUntil(() -> cluster.anyReachedHighWatermark(10));
    RaftNode node = cluster.randomRunning().orElseThrow(() -> new AssertionError("Failed to find running node"));
    // Kill a random node and drop all of its persistent state. The Raft
    // protocol guarantees should still ensure we lose no committed data
    // as long as a new leader is elected before the failed node is restarted.
    cluster.killAndDeletePersistentState(node.nodeId);
    scheduler.runUntil(() -> !cluster.hasLeader(node.nodeId) && cluster.hasConsistentLeader());
    // Now restart the failed node and ensure that it recovers.
    long highWatermarkBeforeRestart = cluster.maxHighWatermarkReached();
    cluster.start(node.nodeId);
    scheduler.runUntil(() -> cluster.allReachedHighWatermark(highWatermarkBeforeRestart + 10));
}
Also used : Random(java.util.Random) Property(net.jqwik.api.Property)

Example 2 with Property

use of net.jqwik.api.Property in project kafka by apache.

the class RaftEventSimulationTest method canMakeProgressAfterBackToBackLeaderFailures.

@Property(tries = 100, afterFailure = AfterFailureMode.SAMPLE_ONLY)
void canMakeProgressAfterBackToBackLeaderFailures(@ForAll int seed, @ForAll @IntRange(min = 3, max = 5) int numVoters, @ForAll @IntRange(min = 0, max = 5) int numObservers) {
    Random random = new Random(seed);
    Cluster cluster = new Cluster(numVoters, numObservers, random);
    MessageRouter router = new MessageRouter(cluster);
    EventScheduler scheduler = schedulerWithDefaultInvariants(cluster);
    // Seed the cluster with some data
    cluster.startAll();
    schedulePolling(scheduler, cluster, 3, 5);
    scheduler.schedule(router::deliverAll, 0, 2, 5);
    scheduler.schedule(new SequentialAppendAction(cluster), 0, 2, 3);
    scheduler.runUntil(cluster::hasConsistentLeader);
    scheduler.runUntil(() -> cluster.anyReachedHighWatermark(10));
    int leaderId = cluster.latestLeader().getAsInt();
    router.filter(leaderId, new DropAllTraffic());
    scheduler.runUntil(() -> cluster.latestLeader().isPresent() && cluster.latestLeader().getAsInt() != leaderId);
    // As soon as we have a new leader, restore traffic to the old leader and partition the new leader
    int newLeaderId = cluster.latestLeader().getAsInt();
    router.filter(leaderId, new PermitAllTraffic());
    router.filter(newLeaderId, new DropAllTraffic());
    // Verify now that we can make progress
    long targetHighWatermark = cluster.maxHighWatermarkReached() + 10;
    scheduler.runUntil(() -> cluster.anyReachedHighWatermark(targetHighWatermark));
}
Also used : Random(java.util.Random) Property(net.jqwik.api.Property)

Example 3 with Property

use of net.jqwik.api.Property in project kafka by apache.

the class RaftEventSimulationTest method canElectInitialLeader.

@Property(tries = 100, afterFailure = AfterFailureMode.SAMPLE_ONLY)
void canElectInitialLeader(@ForAll int seed, @ForAll @IntRange(min = 1, max = 5) int numVoters, @ForAll @IntRange(min = 0, max = 5) int numObservers) {
    Random random = new Random(seed);
    Cluster cluster = new Cluster(numVoters, numObservers, random);
    MessageRouter router = new MessageRouter(cluster);
    EventScheduler scheduler = schedulerWithDefaultInvariants(cluster);
    cluster.startAll();
    schedulePolling(scheduler, cluster, 3, 5);
    scheduler.schedule(router::deliverAll, 0, 2, 1);
    scheduler.schedule(new SequentialAppendAction(cluster), 0, 2, 3);
    scheduler.runUntil(cluster::hasConsistentLeader);
    scheduler.runUntil(() -> cluster.allReachedHighWatermark(10));
}
Also used : Random(java.util.Random) Property(net.jqwik.api.Property)

Example 4 with Property

use of net.jqwik.api.Property in project kafka by apache.

the class RaftEventSimulationTest method canMakeProgressIfMajorityIsReachable.

@Property(tries = 100, afterFailure = AfterFailureMode.SAMPLE_ONLY)
void canMakeProgressIfMajorityIsReachable(@ForAll int seed, @ForAll @IntRange(min = 0, max = 3) int numObservers) {
    int numVoters = 5;
    Random random = new Random(seed);
    Cluster cluster = new Cluster(numVoters, numObservers, random);
    MessageRouter router = new MessageRouter(cluster);
    EventScheduler scheduler = schedulerWithDefaultInvariants(cluster);
    // Seed the cluster with some data
    cluster.startAll();
    schedulePolling(scheduler, cluster, 3, 5);
    scheduler.schedule(router::deliverAll, 0, 2, 2);
    scheduler.schedule(new SequentialAppendAction(cluster), 0, 2, 3);
    scheduler.runUntil(cluster::hasConsistentLeader);
    scheduler.runUntil(() -> cluster.anyReachedHighWatermark(10));
    // Partition the nodes into two sets. Nodes are reachable within each set,
    // but the two sets cannot communicate with each other. We should be able
    // to make progress even if an election is needed in the larger set.
    router.filter(0, new DropOutboundRequestsFrom(Utils.mkSet(2, 3, 4)));
    router.filter(1, new DropOutboundRequestsFrom(Utils.mkSet(2, 3, 4)));
    router.filter(2, new DropOutboundRequestsFrom(Utils.mkSet(0, 1)));
    router.filter(3, new DropOutboundRequestsFrom(Utils.mkSet(0, 1)));
    router.filter(4, new DropOutboundRequestsFrom(Utils.mkSet(0, 1)));
    long partitionLogEndOffset = cluster.maxLogEndOffset();
    scheduler.runUntil(() -> cluster.anyReachedHighWatermark(2 * partitionLogEndOffset));
    long minorityHighWatermark = cluster.maxHighWatermarkReached(Utils.mkSet(0, 1));
    long majorityHighWatermark = cluster.maxHighWatermarkReached(Utils.mkSet(2, 3, 4));
    assertTrue(majorityHighWatermark > minorityHighWatermark, String.format("majorityHighWatermark = %s, minorityHighWatermark = %s", majorityHighWatermark, minorityHighWatermark));
    // Now restore the partition and verify everyone catches up
    router.filter(0, new PermitAllTraffic());
    router.filter(1, new PermitAllTraffic());
    router.filter(2, new PermitAllTraffic());
    router.filter(3, new PermitAllTraffic());
    router.filter(4, new PermitAllTraffic());
    long restoredLogEndOffset = cluster.maxLogEndOffset();
    scheduler.runUntil(() -> cluster.allReachedHighWatermark(2 * restoredLogEndOffset));
}
Also used : Random(java.util.Random) Property(net.jqwik.api.Property)

Example 5 with Property

use of net.jqwik.api.Property in project kafka by apache.

the class RecordsIteratorTest method testMemoryRecords.

@Property
public void testMemoryRecords(@ForAll CompressionType compressionType, @ForAll long seed) {
    List<TestBatch<String>> batches = createBatches(seed);
    MemoryRecords memRecords = buildRecords(compressionType, batches);
    testIterator(batches, memRecords);
}
Also used : MemoryRecords(org.apache.kafka.common.record.MemoryRecords) Property(net.jqwik.api.Property)

Aggregations

Property (net.jqwik.api.Property)9 Random (java.util.Random)7 AtomicInteger (java.util.concurrent.atomic.AtomicInteger)2 MemoryRecords (org.apache.kafka.common.record.MemoryRecords)2 HashSet (java.util.HashSet)1 FileRecords (org.apache.kafka.common.record.FileRecords)1