use of org.apache.kafka.common.record.MemoryRecords in project apache-kafka-on-k8s by banzaicloud.
the class SenderTest method testDownConversionForMismatchedMagicValues.
@Test
public void testDownConversionForMismatchedMagicValues() throws Exception {
// it can happen that we construct a record set with mismatching magic values (perhaps
// because the partition leader changed after the record set was initially constructed)
// in this case, we down-convert record sets with newer magic values to match the oldest
// created record set
long offset = 0;
// start off support produce request v3
apiVersions.update("0", NodeApiVersions.create());
Future<RecordMetadata> future1 = accumulator.append(tp0, 0L, "key".getBytes(), "value".getBytes(), null, null, MAX_BLOCK_TIMEOUT).future;
// now the partition leader supports only v2
apiVersions.update("0", NodeApiVersions.create(Collections.singleton(new ApiVersionsResponse.ApiVersion(ApiKeys.PRODUCE.id, (short) 0, (short) 2))));
Future<RecordMetadata> future2 = accumulator.append(tp1, 0L, "key".getBytes(), "value".getBytes(), null, null, MAX_BLOCK_TIMEOUT).future;
// start off support produce request v3
apiVersions.update("0", NodeApiVersions.create());
ProduceResponse.PartitionResponse resp = new ProduceResponse.PartitionResponse(Errors.NONE, offset, RecordBatch.NO_TIMESTAMP, 100);
Map<TopicPartition, ProduceResponse.PartitionResponse> partResp = new HashMap<>();
partResp.put(tp0, resp);
partResp.put(tp1, resp);
ProduceResponse produceResponse = new ProduceResponse(partResp, 0);
client.prepareResponse(new MockClient.RequestMatcher() {
@Override
public boolean matches(AbstractRequest body) {
ProduceRequest request = (ProduceRequest) body;
if (request.version() != 2)
return false;
Map<TopicPartition, MemoryRecords> recordsMap = request.partitionRecordsOrFail();
if (recordsMap.size() != 2)
return false;
for (MemoryRecords records : recordsMap.values()) {
if (records == null || records.sizeInBytes() == 0 || !records.hasMatchingMagic(RecordBatch.MAGIC_VALUE_V1))
return false;
}
return true;
}
}, produceResponse);
// connect
sender.run(time.milliseconds());
// send produce request
sender.run(time.milliseconds());
assertTrue("Request should be completed", future1.isDone());
assertTrue("Request should be completed", future2.isDone());
}
use of org.apache.kafka.common.record.MemoryRecords in project apache-kafka-on-k8s by banzaicloud.
the class FetcherTest method testReturnCommittedTransactions.
@Test
public void testReturnCommittedTransactions() {
Fetcher<byte[], byte[]> fetcher = createFetcher(subscriptions, new Metrics(), new ByteArrayDeserializer(), new ByteArrayDeserializer(), Integer.MAX_VALUE, IsolationLevel.READ_COMMITTED);
ByteBuffer buffer = ByteBuffer.allocate(1024);
int currentOffset = 0;
currentOffset += appendTransactionalRecords(buffer, 1L, currentOffset, new SimpleRecord(time.milliseconds(), "key".getBytes(), "value".getBytes()), new SimpleRecord(time.milliseconds(), "key".getBytes(), "value".getBytes()));
currentOffset += commitTransaction(buffer, 1L, currentOffset);
buffer.flip();
List<FetchResponse.AbortedTransaction> abortedTransactions = new ArrayList<>();
MemoryRecords records = MemoryRecords.readableRecords(buffer);
subscriptions.assignFromUser(singleton(tp0));
subscriptions.seek(tp0, 0);
// normal fetch
assertEquals(1, fetcher.sendFetches());
assertFalse(fetcher.hasCompletedFetches());
client.prepareResponse(new MockClient.RequestMatcher() {
@Override
public boolean matches(AbstractRequest body) {
FetchRequest request = (FetchRequest) body;
assertEquals(IsolationLevel.READ_COMMITTED, request.isolationLevel());
return true;
}
}, fullFetchResponseWithAbortedTransactions(records, abortedTransactions, Errors.NONE, 100L, 100L, 0));
consumerClient.poll(0);
assertTrue(fetcher.hasCompletedFetches());
Map<TopicPartition, List<ConsumerRecord<byte[], byte[]>>> fetchedRecords = fetcher.fetchedRecords();
assertTrue(fetchedRecords.containsKey(tp0));
assertEquals(fetchedRecords.get(tp0).size(), 2);
}
use of org.apache.kafka.common.record.MemoryRecords in project apache-kafka-on-k8s by banzaicloud.
the class FetcherTest method testMultipleAbortMarkers.
@Test
public void testMultipleAbortMarkers() {
Fetcher<byte[], byte[]> fetcher = createFetcher(subscriptions, new Metrics(), new ByteArrayDeserializer(), new ByteArrayDeserializer(), Integer.MAX_VALUE, IsolationLevel.READ_COMMITTED);
ByteBuffer buffer = ByteBuffer.allocate(1024);
int currentOffset = 0;
currentOffset += appendTransactionalRecords(buffer, 1L, currentOffset, new SimpleRecord(time.milliseconds(), "abort1-1".getBytes(), "value".getBytes()), new SimpleRecord(time.milliseconds(), "abort1-2".getBytes(), "value".getBytes()));
currentOffset += abortTransaction(buffer, 1L, currentOffset);
// Duplicate abort -- should be ignored.
currentOffset += abortTransaction(buffer, 1L, currentOffset);
// Now commit a transaction.
currentOffset += appendTransactionalRecords(buffer, 1L, currentOffset, new SimpleRecord(time.milliseconds(), "commit1-1".getBytes(), "value".getBytes()), new SimpleRecord(time.milliseconds(), "commit1-2".getBytes(), "value".getBytes()));
commitTransaction(buffer, 1L, currentOffset);
buffer.flip();
List<FetchResponse.AbortedTransaction> abortedTransactions = new ArrayList<>();
abortedTransactions.add(new FetchResponse.AbortedTransaction(1, 0));
MemoryRecords records = MemoryRecords.readableRecords(buffer);
subscriptions.assignFromUser(singleton(tp0));
subscriptions.seek(tp0, 0);
// normal fetch
assertEquals(1, fetcher.sendFetches());
assertFalse(fetcher.hasCompletedFetches());
client.prepareResponse(fullFetchResponseWithAbortedTransactions(records, abortedTransactions, Errors.NONE, 100L, 100L, 0));
consumerClient.poll(0);
assertTrue(fetcher.hasCompletedFetches());
Map<TopicPartition, List<ConsumerRecord<byte[], byte[]>>> fetchedRecords = fetcher.fetchedRecords();
assertTrue(fetchedRecords.containsKey(tp0));
assertEquals(fetchedRecords.get(tp0).size(), 2);
List<ConsumerRecord<byte[], byte[]>> fetchedConsumerRecords = fetchedRecords.get(tp0);
Set<String> committedKeys = new HashSet<>(Arrays.asList("commit1-1", "commit1-2"));
Set<String> actuallyCommittedKeys = new HashSet<>();
for (ConsumerRecord<byte[], byte[]> consumerRecord : fetchedConsumerRecords) {
actuallyCommittedKeys.add(new String(consumerRecord.key(), StandardCharsets.UTF_8));
}
assertTrue(actuallyCommittedKeys.equals(committedKeys));
}
use of org.apache.kafka.common.record.MemoryRecords in project apache-kafka-on-k8s by banzaicloud.
the class FetcherTest method testFetchResponseMetricsWithOnePartitionAtTheWrongOffset.
@Test
public void testFetchResponseMetricsWithOnePartitionAtTheWrongOffset() {
subscriptions.assignFromUser(Utils.mkSet(tp0, tp1));
subscriptions.seek(tp0, 0);
subscriptions.seek(tp1, 0);
Map<MetricName, KafkaMetric> allMetrics = metrics.metrics();
KafkaMetric fetchSizeAverage = allMetrics.get(metrics.metricInstance(metricsRegistry.fetchSizeAvg));
KafkaMetric recordsCountAverage = allMetrics.get(metrics.metricInstance(metricsRegistry.recordsPerRequestAvg));
// send the fetch and then seek to a new offset
assertEquals(1, fetcher.sendFetches());
subscriptions.seek(tp1, 5);
MemoryRecordsBuilder builder = MemoryRecords.builder(ByteBuffer.allocate(1024), CompressionType.NONE, TimestampType.CREATE_TIME, 0L);
for (int v = 0; v < 3; v++) builder.appendWithOffset(v, RecordBatch.NO_TIMESTAMP, "key".getBytes(), ("value-" + v).getBytes());
MemoryRecords records = builder.build();
Map<TopicPartition, FetchResponse.PartitionData> partitions = new HashMap<>();
partitions.put(tp0, new FetchResponse.PartitionData(Errors.NONE, 100, FetchResponse.INVALID_LAST_STABLE_OFFSET, 0L, null, records));
partitions.put(tp1, new FetchResponse.PartitionData(Errors.NONE, 100, FetchResponse.INVALID_LAST_STABLE_OFFSET, 0L, null, MemoryRecords.withRecords(CompressionType.NONE, new SimpleRecord("val".getBytes()))));
client.prepareResponse(new FetchResponse(Errors.NONE, new LinkedHashMap<>(partitions), 0, INVALID_SESSION_ID));
consumerClient.poll(0);
fetcher.fetchedRecords();
// we should have ignored the record at the wrong offset
int expectedBytes = 0;
for (Record record : records.records()) expectedBytes += record.sizeInBytes();
assertEquals(expectedBytes, fetchSizeAverage.value(), EPSILON);
assertEquals(3, recordsCountAverage.value(), EPSILON);
}
use of org.apache.kafka.common.record.MemoryRecords in project apache-kafka-on-k8s by banzaicloud.
the class FetcherTest method testFetchNonContinuousRecords.
@Test
public void testFetchNonContinuousRecords() {
// if we are fetching from a compacted topic, there may be gaps in the returned records
// this test verifies the fetcher updates the current fetched/consumed positions correctly for this case
MemoryRecordsBuilder builder = MemoryRecords.builder(ByteBuffer.allocate(1024), CompressionType.NONE, TimestampType.CREATE_TIME, 0L);
builder.appendWithOffset(15L, 0L, "key".getBytes(), "value-1".getBytes());
builder.appendWithOffset(20L, 0L, "key".getBytes(), "value-2".getBytes());
builder.appendWithOffset(30L, 0L, "key".getBytes(), "value-3".getBytes());
MemoryRecords records = builder.build();
List<ConsumerRecord<byte[], byte[]>> consumerRecords;
subscriptions.assignFromUser(singleton(tp0));
subscriptions.seek(tp0, 0);
// normal fetch
assertEquals(1, fetcher.sendFetches());
client.prepareResponse(fullFetchResponse(tp0, records, Errors.NONE, 100L, 0));
consumerClient.poll(0);
consumerRecords = fetcher.fetchedRecords().get(tp0);
assertEquals(3, consumerRecords.size());
// this is the next fetching position
assertEquals(31L, subscriptions.position(tp0).longValue());
assertEquals(15L, consumerRecords.get(0).offset());
assertEquals(20L, consumerRecords.get(1).offset());
assertEquals(30L, consumerRecords.get(2).offset());
}
Aggregations