use of io.streamnative.pulsar.handlers.kop.KafkaCommandDecoder.KafkaHeaderAndRequest in project starlight-for-kafka by datastax.
the class KafkaRequestHandlerTest method testListOffsetsForNotExistedTopic.
@Test(timeOut = 10000)
public void testListOffsetsForNotExistedTopic() throws Exception {
final TopicPartition topicPartition = new TopicPartition("testListOffsetsForNotExistedTopic", 0);
final CompletableFuture<AbstractResponse> responseFuture = new CompletableFuture<>();
final RequestHeader header = new RequestHeader(ApiKeys.LIST_OFFSETS, ApiKeys.LIST_OFFSETS.latestVersion(), "client", 0);
final ListOffsetRequest request = ListOffsetRequest.Builder.forConsumer(true, IsolationLevel.READ_UNCOMMITTED).setTargetTimes(KafkaCommonTestUtils.newListOffsetTargetTimes(topicPartition, ListOffsetRequest.EARLIEST_TIMESTAMP)).build(ApiKeys.LIST_OFFSETS.latestVersion());
handler.handleListOffsetRequest(new KafkaHeaderAndRequest(header, request, PulsarByteBufAllocator.DEFAULT.heapBuffer(), null), responseFuture);
final ListOffsetResponse response = (ListOffsetResponse) responseFuture.get();
assertTrue(response.responseData().containsKey(topicPartition));
assertEquals(response.responseData().get(topicPartition).error, Errors.UNKNOWN_TOPIC_OR_PARTITION);
}
use of io.streamnative.pulsar.handlers.kop.KafkaCommandDecoder.KafkaHeaderAndRequest in project starlight-for-kafka by datastax.
the class KafkaApisTest method buildRequest.
static KafkaHeaderAndRequest buildRequest(AbstractRequest.Builder builder, SocketAddress serviceAddress) {
AbstractRequest request = builder.build();
builder.apiKey();
ByteBuffer serializedRequest = request.serialize(new RequestHeader(builder.apiKey(), request.version(), "fake_client_id", 0));
ByteBuf byteBuf = Unpooled.copiedBuffer(serializedRequest);
RequestHeader header = RequestHeader.parse(serializedRequest);
ApiKeys apiKey = header.apiKey();
short apiVersion = header.apiVersion();
Struct struct = apiKey.parseRequest(apiVersion, serializedRequest);
AbstractRequest body = AbstractRequest.parseRequest(apiKey, apiVersion, struct);
return new KafkaHeaderAndRequest(header, body, byteBuf, serviceAddress);
}
use of io.streamnative.pulsar.handlers.kop.KafkaCommandDecoder.KafkaHeaderAndRequest in project starlight-for-kafka by datastax.
the class KafkaApisTest method testGetOffsetsForUnknownTopic.
@Test(timeOut = 20000, enabled = false)
public // https://github.com/streamnative/kop/issues/51
void testGetOffsetsForUnknownTopic() throws Exception {
String topicName = "kopTestGetOffsetsForUnknownTopic";
TopicPartition tp = new TopicPartition(topicName, 0);
ListOffsetRequest.Builder builder = ListOffsetRequest.Builder.forConsumer(false, IsolationLevel.READ_UNCOMMITTED).setTargetTimes(KafkaCommonTestUtils.newListOffsetTargetTimes(tp, ListOffsetRequest.LATEST_TIMESTAMP));
KafkaHeaderAndRequest request = buildRequest(builder);
CompletableFuture<AbstractResponse> responseFuture = new CompletableFuture<>();
kafkaRequestHandler.handleListOffsetRequest(request, responseFuture);
AbstractResponse response = responseFuture.get();
ListOffsetResponse listOffsetResponse = (ListOffsetResponse) response;
assertEquals(listOffsetResponse.responseData().get(tp).error, Errors.UNKNOWN_TOPIC_OR_PARTITION);
}
use of io.streamnative.pulsar.handlers.kop.KafkaCommandDecoder.KafkaHeaderAndRequest in project starlight-for-kafka by datastax.
the class KafkaApisTest method listOffset.
private ListOffsetResponse listOffset(long timestamp, TopicPartition tp) throws Exception {
ListOffsetRequest.Builder builder = ListOffsetRequest.Builder.forConsumer(true, IsolationLevel.READ_UNCOMMITTED).setTargetTimes(KafkaCommonTestUtils.newListOffsetTargetTimes(tp, timestamp));
KafkaHeaderAndRequest request = buildRequest(builder);
CompletableFuture<AbstractResponse> responseFuture = new CompletableFuture<>();
kafkaRequestHandler.handleListOffsetRequest(request, responseFuture);
AbstractResponse response = responseFuture.get();
return (ListOffsetResponse) response;
}
use of io.streamnative.pulsar.handlers.kop.KafkaCommandDecoder.KafkaHeaderAndRequest in project starlight-for-kafka by datastax.
the class KafkaApisTest method testBrokerRespectsPartitionsOrderAndSizeLimits.
@Ignore
@Test(timeOut = 20000)
public void testBrokerRespectsPartitionsOrderAndSizeLimits() throws Exception {
String topicName = "kopBrokerRespectsPartitionsOrderAndSizeLimits";
int numberTopics = 8;
int numberPartitions = 6;
int messagesPerPartition = 9;
int maxResponseBytes = 800;
int maxPartitionBytes = 900;
List<TopicPartition> topicPartitions = createTopics(topicName, numberTopics, numberPartitions);
List<TopicPartition> partitionsWithLargeMessages = topicPartitions.subList(topicPartitions.size() - 2, topicPartitions.size());
TopicPartition partitionWithLargeMessage1 = partitionsWithLargeMessages.get(0);
TopicPartition partitionWithLargeMessage2 = partitionsWithLargeMessages.get(1);
List<TopicPartition> partitionsWithoutLargeMessages = topicPartitions.subList(0, topicPartitions.size() - 2);
@Cleanup KafkaProducer<String, String> kProducer = createKafkaProducer();
produceData(kProducer, topicPartitions, messagesPerPartition);
kProducer.send(new ProducerRecord<>(partitionWithLargeMessage1.topic(), partitionWithLargeMessage1.partition(), "larger than partition limit", new String(new byte[maxPartitionBytes + 1]))).get();
kProducer.send(new ProducerRecord<>(partitionWithLargeMessage2.topic(), partitionWithLargeMessage2.partition(), "larger than partition limit", new String(new byte[maxResponseBytes + 1]))).get();
// 1. Partitions with large messages at the end
Collections.shuffle(partitionsWithoutLargeMessages);
List<TopicPartition> shuffledTopicPartitions1 = Lists.newArrayListWithExpectedSize(topicPartitions.size());
shuffledTopicPartitions1.addAll(partitionsWithoutLargeMessages);
shuffledTopicPartitions1.addAll(partitionsWithLargeMessages);
KafkaHeaderAndRequest fetchRequest1 = createFetchRequest(maxResponseBytes, maxPartitionBytes, shuffledTopicPartitions1, Collections.EMPTY_MAP);
CompletableFuture<AbstractResponse> responseFuture1 = new CompletableFuture<>();
kafkaRequestHandler.handleFetchRequest(fetchRequest1, responseFuture1);
FetchResponse<MemoryRecords> fetchResponse1 = (FetchResponse<MemoryRecords>) responseFuture1.get();
checkFetchResponse(shuffledTopicPartitions1, fetchResponse1, maxPartitionBytes, maxResponseBytes, messagesPerPartition);
// 2. Same as 1, but shuffled again
Collections.shuffle(partitionsWithoutLargeMessages);
List<TopicPartition> shuffledTopicPartitions2 = Lists.newArrayListWithExpectedSize(topicPartitions.size());
shuffledTopicPartitions2.addAll(partitionsWithoutLargeMessages);
shuffledTopicPartitions2.addAll(partitionsWithLargeMessages);
KafkaHeaderAndRequest fetchRequest2 = createFetchRequest(maxResponseBytes, maxPartitionBytes, shuffledTopicPartitions2, Collections.EMPTY_MAP);
CompletableFuture<AbstractResponse> responseFuture2 = new CompletableFuture<>();
kafkaRequestHandler.handleFetchRequest(fetchRequest2, responseFuture2);
FetchResponse<MemoryRecords> fetchResponse2 = (FetchResponse<MemoryRecords>) responseFuture2.get();
checkFetchResponse(shuffledTopicPartitions2, fetchResponse2, maxPartitionBytes, maxResponseBytes, messagesPerPartition);
// 3. Partition with message larger than the partition limit at the start of the list
Collections.shuffle(partitionsWithoutLargeMessages);
List<TopicPartition> shuffledTopicPartitions3 = Lists.newArrayListWithExpectedSize(topicPartitions.size());
shuffledTopicPartitions3.addAll(partitionsWithLargeMessages);
shuffledTopicPartitions3.addAll(partitionsWithoutLargeMessages);
Map<TopicPartition, Long> offsetMaps = Maps.newHashMap();
offsetMaps.put(partitionWithLargeMessage1, Long.valueOf(messagesPerPartition));
KafkaHeaderAndRequest fetchRequest3 = createFetchRequest(maxResponseBytes, maxPartitionBytes, shuffledTopicPartitions3, offsetMaps);
CompletableFuture<AbstractResponse> responseFuture3 = new CompletableFuture<>();
kafkaRequestHandler.handleFetchRequest(fetchRequest3, responseFuture3);
FetchResponse<MemoryRecords> fetchResponse3 = (FetchResponse<MemoryRecords>) responseFuture3.get();
checkFetchResponse(shuffledTopicPartitions3, fetchResponse3, maxPartitionBytes, maxResponseBytes, messagesPerPartition);
}
Aggregations