use of org.apache.kafka.common.record.Records in project kafka by apache.
the class KafkaRaftClientTest method testFetchResponseIgnoredAfterBecomingCandidate.
@Test
public void testFetchResponseIgnoredAfterBecomingCandidate() throws Exception {
int localId = 0;
int otherNodeId = 1;
int epoch = 5;
// The other node starts out as the leader
Set<Integer> voters = Utils.mkSet(localId, otherNodeId);
RaftClientTestContext context = new RaftClientTestContext.Builder(localId, voters).withElectedLeader(epoch, otherNodeId).build();
context.assertElectedLeader(epoch, otherNodeId);
// Wait until we have a Fetch inflight to the leader
context.pollUntilRequest();
int fetchCorrelationId = context.assertSentFetchRequest(epoch, 0L, 0);
// Now await the fetch timeout and become a candidate
context.time.sleep(context.fetchTimeoutMs);
context.client.poll();
context.assertVotedCandidate(epoch + 1, localId);
// The fetch response from the old leader returns, but it should be ignored
Records records = context.buildBatch(0L, 3, Arrays.asList("a", "b"));
context.deliverResponse(fetchCorrelationId, otherNodeId, context.fetchResponse(epoch, otherNodeId, records, 0L, Errors.NONE));
context.client.poll();
assertEquals(0, context.log.endOffset().offset);
context.assertVotedCandidate(epoch + 1, localId);
}
use of org.apache.kafka.common.record.Records in project kafka by apache.
the class KafkaRaftClientTest method testFetchResponseIgnoredAfterBecomingFollowerOfDifferentLeader.
@Test
public void testFetchResponseIgnoredAfterBecomingFollowerOfDifferentLeader() throws Exception {
int localId = 0;
int voter1 = localId;
int voter2 = localId + 1;
int voter3 = localId + 2;
int epoch = 5;
// Start out with `voter2` as the leader
Set<Integer> voters = Utils.mkSet(voter1, voter2, voter3);
RaftClientTestContext context = new RaftClientTestContext.Builder(localId, voters).withElectedLeader(epoch, voter2).build();
context.assertElectedLeader(epoch, voter2);
// Wait until we have a Fetch inflight to the leader
context.pollUntilRequest();
int fetchCorrelationId = context.assertSentFetchRequest(epoch, 0L, 0);
// Now receive a BeginEpoch from `voter3`
context.deliverRequest(context.beginEpochRequest(epoch + 1, voter3));
context.client.poll();
context.assertElectedLeader(epoch + 1, voter3);
// The fetch response from the old leader returns, but it should be ignored
Records records = context.buildBatch(0L, 3, Arrays.asList("a", "b"));
FetchResponseData response = context.fetchResponse(epoch, voter2, records, 0L, Errors.NONE);
context.deliverResponse(fetchCorrelationId, voter2, response);
context.client.poll();
assertEquals(0, context.log.endOffset().offset);
context.assertElectedLeader(epoch + 1, voter3);
}
use of org.apache.kafka.common.record.Records in project kafka by apache.
the class KafkaRaftClientTest method testInitializeAsCandidateAndBecomeLeader.
@Test
public void testInitializeAsCandidateAndBecomeLeader() throws Exception {
int localId = 0;
final int otherNodeId = 1;
Set<Integer> voters = Utils.mkSet(localId, otherNodeId);
RaftClientTestContext context = new RaftClientTestContext.Builder(localId, voters).build();
context.assertUnknownLeader(0);
context.time.sleep(2 * context.electionTimeoutMs());
context.pollUntilRequest();
context.assertVotedCandidate(1, localId);
int correlationId = context.assertSentVoteRequest(1, 0, 0L, 1);
context.deliverResponse(correlationId, otherNodeId, context.voteResponse(true, Optional.empty(), 1));
// Become leader after receiving the vote
context.pollUntil(() -> context.log.endOffset().offset == 1L);
context.assertElectedLeader(1, localId);
long electionTimestamp = context.time.milliseconds();
// Leader change record appended
assertEquals(1L, context.log.endOffset().offset);
assertEquals(1L, context.log.lastFlushedOffset());
// Send BeginQuorumEpoch to voters
context.client.poll();
context.assertSentBeginQuorumEpochRequest(1, 1);
Records records = context.log.read(0, Isolation.UNCOMMITTED).records;
RecordBatch batch = records.batches().iterator().next();
assertTrue(batch.isControlBatch());
Record record = batch.iterator().next();
assertEquals(electionTimestamp, record.timestamp());
RaftClientTestContext.verifyLeaderChangeMessage(localId, Arrays.asList(localId, otherNodeId), Arrays.asList(otherNodeId, localId), record.key(), record.value());
}
use of org.apache.kafka.common.record.Records in project kafka by apache.
the class FetcherTest method testFetcherConcurrency.
@Test
public void testFetcherConcurrency() throws Exception {
int numPartitions = 20;
Set<TopicPartition> topicPartitions = new HashSet<>();
for (int i = 0; i < numPartitions; i++) topicPartitions.add(new TopicPartition(topicName, i));
LogContext logContext = new LogContext();
buildDependencies(new MetricConfig(), Long.MAX_VALUE, new SubscriptionState(logContext, OffsetResetStrategy.EARLIEST), logContext);
fetcher = new Fetcher<byte[], byte[]>(new LogContext(), consumerClient, minBytes, maxBytes, maxWaitMs, fetchSize, 2 * numPartitions, true, "", new ByteArrayDeserializer(), new ByteArrayDeserializer(), metadata, subscriptions, metrics, metricsRegistry, time, retryBackoffMs, requestTimeoutMs, IsolationLevel.READ_UNCOMMITTED, apiVersions) {
@Override
protected FetchSessionHandler sessionHandler(int id) {
final FetchSessionHandler handler = super.sessionHandler(id);
if (handler == null)
return null;
else {
return new FetchSessionHandler(new LogContext(), id) {
@Override
public Builder newBuilder() {
verifySessionPartitions();
return handler.newBuilder();
}
@Override
public boolean handleResponse(FetchResponse response, short version) {
verifySessionPartitions();
return handler.handleResponse(response, version);
}
@Override
public void handleError(Throwable t) {
verifySessionPartitions();
handler.handleError(t);
}
// Verify that session partitions can be traversed safely.
private void verifySessionPartitions() {
try {
Field field = FetchSessionHandler.class.getDeclaredField("sessionPartitions");
field.setAccessible(true);
LinkedHashMap<?, ?> sessionPartitions = (LinkedHashMap<?, ?>) field.get(handler);
for (Map.Entry<?, ?> entry : sessionPartitions.entrySet()) {
// If `sessionPartitions` are modified on another thread, Thread.yield will increase the
// possibility of ConcurrentModificationException if appropriate synchronization is not used.
Thread.yield();
}
} catch (Exception e) {
throw new RuntimeException(e);
}
}
};
}
}
};
MetadataResponse initialMetadataResponse = RequestTestUtils.metadataUpdateWithIds(1, singletonMap(topicName, numPartitions), tp -> validLeaderEpoch, topicIds);
client.updateMetadata(initialMetadataResponse);
fetchSize = 10000;
assignFromUser(topicPartitions);
topicPartitions.forEach(tp -> subscriptions.seek(tp, 0L));
AtomicInteger fetchesRemaining = new AtomicInteger(1000);
executorService = Executors.newSingleThreadExecutor();
Future<?> future = executorService.submit(() -> {
while (fetchesRemaining.get() > 0) {
synchronized (consumerClient) {
if (!client.requests().isEmpty()) {
ClientRequest request = client.requests().peek();
FetchRequest fetchRequest = (FetchRequest) request.requestBuilder().build();
LinkedHashMap<TopicIdPartition, FetchResponseData.PartitionData> responseMap = new LinkedHashMap<>();
for (Map.Entry<TopicIdPartition, FetchRequest.PartitionData> entry : fetchRequest.fetchData(topicNames).entrySet()) {
TopicIdPartition tp = entry.getKey();
long offset = entry.getValue().fetchOffset;
responseMap.put(tp, new FetchResponseData.PartitionData().setPartitionIndex(tp.topicPartition().partition()).setHighWatermark(offset + 2).setLastStableOffset(offset + 2).setLogStartOffset(0).setRecords(buildRecords(offset, 2, offset)));
}
client.respondToRequest(request, FetchResponse.of(Errors.NONE, 0, 123, responseMap));
consumerClient.poll(time.timer(0));
}
}
}
return fetchesRemaining.get();
});
Map<TopicPartition, Long> nextFetchOffsets = topicPartitions.stream().collect(Collectors.toMap(Function.identity(), t -> 0L));
while (fetchesRemaining.get() > 0 && !future.isDone()) {
if (fetcher.sendFetches() == 1) {
synchronized (consumerClient) {
consumerClient.poll(time.timer(0));
}
}
if (fetcher.hasCompletedFetches()) {
Map<TopicPartition, List<ConsumerRecord<byte[], byte[]>>> fetchedRecords = fetchedRecords();
if (!fetchedRecords.isEmpty()) {
fetchesRemaining.decrementAndGet();
fetchedRecords.forEach((tp, records) -> {
assertEquals(2, records.size());
long nextOffset = nextFetchOffsets.get(tp);
assertEquals(nextOffset, records.get(0).offset());
assertEquals(nextOffset + 1, records.get(1).offset());
nextFetchOffsets.put(tp, nextOffset + 2);
});
}
}
}
assertEquals(0, future.get());
}
Aggregations