use of org.apache.kafka.common.header.internals.RecordHeaders in project kafka by apache.
the class KafkaBasedLogTest method testSendAndReadToEnd.
@Test
public void testSendAndReadToEnd() throws Exception {
expectStart();
TestFuture<RecordMetadata> tp0Future = new TestFuture<>();
ProducerRecord<String, String> tp0Record = new ProducerRecord<>(TOPIC, TP0_KEY, TP0_VALUE);
Capture<org.apache.kafka.clients.producer.Callback> callback0 = EasyMock.newCapture();
EasyMock.expect(producer.send(EasyMock.eq(tp0Record), EasyMock.capture(callback0))).andReturn(tp0Future);
TestFuture<RecordMetadata> tp1Future = new TestFuture<>();
ProducerRecord<String, String> tp1Record = new ProducerRecord<>(TOPIC, TP1_KEY, TP1_VALUE);
Capture<org.apache.kafka.clients.producer.Callback> callback1 = EasyMock.newCapture();
EasyMock.expect(producer.send(EasyMock.eq(tp1Record), EasyMock.capture(callback1))).andReturn(tp1Future);
// Producer flushes when read to log end is called
producer.flush();
PowerMock.expectLastCall();
expectStop();
PowerMock.replayAll();
Map<TopicPartition, Long> endOffsets = new HashMap<>();
endOffsets.put(TP0, 0L);
endOffsets.put(TP1, 0L);
consumer.updateEndOffsets(endOffsets);
store.start();
assertEquals(CONSUMER_ASSIGNMENT, consumer.assignment());
assertEquals(0L, consumer.position(TP0));
assertEquals(0L, consumer.position(TP1));
// Set some keys
final AtomicInteger invoked = new AtomicInteger(0);
org.apache.kafka.clients.producer.Callback producerCallback = (metadata, exception) -> invoked.incrementAndGet();
store.send(TP0_KEY, TP0_VALUE, producerCallback);
store.send(TP1_KEY, TP1_VALUE, producerCallback);
assertEquals(0, invoked.get());
// Output not used, so safe to not return a real value for testing
tp1Future.resolve((RecordMetadata) null);
callback1.getValue().onCompletion(null, null);
assertEquals(1, invoked.get());
tp0Future.resolve((RecordMetadata) null);
callback0.getValue().onCompletion(null, null);
assertEquals(2, invoked.get());
// Now we should have to wait for the records to be read back when we call readToEnd()
final AtomicBoolean getInvoked = new AtomicBoolean(false);
final FutureCallback<Void> readEndFutureCallback = new FutureCallback<>((error, result) -> getInvoked.set(true));
consumer.schedulePollTask(() -> {
// Once we're synchronized in a poll, start the read to end and schedule the exact set of poll events
// that should follow. This readToEnd call will immediately wakeup this consumer.poll() call without
// returning any data.
Map<TopicPartition, Long> newEndOffsets = new HashMap<>();
newEndOffsets.put(TP0, 2L);
newEndOffsets.put(TP1, 2L);
consumer.updateEndOffsets(newEndOffsets);
store.readToEnd(readEndFutureCallback);
// Should keep polling until it reaches current log end offset for all partitions
consumer.scheduleNopPollTask();
consumer.scheduleNopPollTask();
consumer.scheduleNopPollTask();
consumer.schedulePollTask(() -> {
consumer.addRecord(new ConsumerRecord<>(TOPIC, 0, 0, 0L, TimestampType.CREATE_TIME, 0, 0, TP0_KEY, TP0_VALUE, new RecordHeaders(), Optional.empty()));
consumer.addRecord(new ConsumerRecord<>(TOPIC, 0, 1, 0L, TimestampType.CREATE_TIME, 0, 0, TP0_KEY, TP0_VALUE_NEW, new RecordHeaders(), Optional.empty()));
consumer.addRecord(new ConsumerRecord<>(TOPIC, 1, 0, 0L, TimestampType.CREATE_TIME, 0, 0, TP1_KEY, TP1_VALUE, new RecordHeaders(), Optional.empty()));
});
consumer.schedulePollTask(() -> consumer.addRecord(new ConsumerRecord<>(TOPIC, 1, 1, 0L, TimestampType.CREATE_TIME, 0, 0, TP1_KEY, TP1_VALUE_NEW, new RecordHeaders(), Optional.empty())));
// Already have FutureCallback that should be invoked/awaited, so no need for follow up finishedLatch
});
readEndFutureCallback.get(10000, TimeUnit.MILLISECONDS);
assertTrue(getInvoked.get());
assertEquals(2, consumedRecords.size());
assertEquals(2, consumedRecords.get(TP0).size());
assertEquals(TP0_VALUE, consumedRecords.get(TP0).get(0).value());
assertEquals(TP0_VALUE_NEW, consumedRecords.get(TP0).get(1).value());
assertEquals(2, consumedRecords.get(TP1).size());
assertEquals(TP1_VALUE, consumedRecords.get(TP1).get(0).value());
assertEquals(TP1_VALUE_NEW, consumedRecords.get(TP1).get(1).value());
// Cleanup
store.stop();
assertFalse(Whitebox.<Thread>getInternalState(store, "thread").isAlive());
assertTrue(consumer.closed());
PowerMock.verifyAll();
}
use of org.apache.kafka.common.header.internals.RecordHeaders in project kafka by apache.
the class KafkaBasedLogTest method testReloadOnStart.
@Test
public void testReloadOnStart() throws Exception {
expectStart();
expectStop();
PowerMock.replayAll();
Map<TopicPartition, Long> endOffsets = new HashMap<>();
endOffsets.put(TP0, 1L);
endOffsets.put(TP1, 1L);
consumer.updateEndOffsets(endOffsets);
final CountDownLatch finishedLatch = new CountDownLatch(1);
consumer.schedulePollTask(() -> {
// Use first poll task to setup sequence of remaining responses to polls
// Should keep polling until it reaches current log end offset for all partitions. Should handle
// as many empty polls as needed
consumer.scheduleNopPollTask();
consumer.scheduleNopPollTask();
consumer.schedulePollTask(() -> consumer.addRecord(new ConsumerRecord<>(TOPIC, 0, 0, 0L, TimestampType.CREATE_TIME, 0, 0, TP0_KEY, TP0_VALUE, new RecordHeaders(), Optional.empty())));
consumer.scheduleNopPollTask();
consumer.scheduleNopPollTask();
consumer.schedulePollTask(() -> consumer.addRecord(new ConsumerRecord<>(TOPIC, 1, 0, 0L, TimestampType.CREATE_TIME, 0, 0, TP1_KEY, TP1_VALUE, new RecordHeaders(), Optional.empty())));
consumer.schedulePollTask(finishedLatch::countDown);
});
store.start();
assertTrue(finishedLatch.await(10000, TimeUnit.MILLISECONDS));
assertEquals(CONSUMER_ASSIGNMENT, consumer.assignment());
assertEquals(2, consumedRecords.size());
assertEquals(TP0_VALUE, consumedRecords.get(TP0).get(0).value());
assertEquals(TP1_VALUE, consumedRecords.get(TP1).get(0).value());
store.stop();
assertFalse(Whitebox.<Thread>getInternalState(store, "thread").isAlive());
assertTrue(consumer.closed());
PowerMock.verifyAll();
}
use of org.apache.kafka.common.header.internals.RecordHeaders in project kafka by apache.
the class KafkaBasedLogTest method testPollConsumerError.
@Test
public void testPollConsumerError() throws Exception {
expectStart();
expectStop();
PowerMock.replayAll();
final CountDownLatch finishedLatch = new CountDownLatch(1);
Map<TopicPartition, Long> endOffsets = new HashMap<>();
endOffsets.put(TP0, 1L);
endOffsets.put(TP1, 1L);
consumer.updateEndOffsets(endOffsets);
consumer.schedulePollTask(() -> {
// Trigger exception
consumer.schedulePollTask(() -> consumer.setPollException(Errors.COORDINATOR_NOT_AVAILABLE.exception()));
// Should keep polling until it reaches current log end offset for all partitions
consumer.scheduleNopPollTask();
consumer.scheduleNopPollTask();
consumer.schedulePollTask(() -> {
consumer.addRecord(new ConsumerRecord<>(TOPIC, 0, 0, 0L, TimestampType.CREATE_TIME, 0, 0, TP0_KEY, TP0_VALUE_NEW, new RecordHeaders(), Optional.empty()));
consumer.addRecord(new ConsumerRecord<>(TOPIC, 1, 0, 0L, TimestampType.CREATE_TIME, 0, 0, TP0_KEY, TP0_VALUE_NEW, new RecordHeaders(), Optional.empty()));
});
consumer.schedulePollTask(finishedLatch::countDown);
});
store.start();
assertTrue(finishedLatch.await(10000, TimeUnit.MILLISECONDS));
assertEquals(CONSUMER_ASSIGNMENT, consumer.assignment());
assertEquals(1L, consumer.position(TP0));
store.stop();
assertFalse(Whitebox.<Thread>getInternalState(store, "thread").isAlive());
assertTrue(consumer.closed());
PowerMock.verifyAll();
}
use of org.apache.kafka.common.header.internals.RecordHeaders in project kafka by apache.
the class RecordCollectorTest method shouldThrowInformativeStreamsExceptionOnValueClassCastException.
@SuppressWarnings({ "unchecked", "rawtypes" })
@Test
public void shouldThrowInformativeStreamsExceptionOnValueClassCastException() {
final StreamsException expected = assertThrows(StreamsException.class, () -> this.collector.send("topic", "key", "value", new RecordHeaders(), 0, 0L, new StringSerializer(), // need to add cast to trigger `ClassCastException`
(Serializer) new LongSerializer()));
assertThat(expected.getCause(), instanceOf(ClassCastException.class));
assertThat(expected.getMessage(), equalTo("ClassCastException while producing data to topic topic. " + "A serializer (key: org.apache.kafka.common.serialization.StringSerializer / value: org.apache.kafka.common.serialization.LongSerializer) " + "is not compatible to the actual key or value type (key type: java.lang.String / value type: java.lang.String). " + "Change the default Serdes in StreamConfig or provide correct Serdes via method parameters " + "(for example if using the DSL, `#to(String topic, Produced<K, V> produced)` with `Produced.keySerde(WindowedSerdes.timeWindowedSerdeFrom(String.class))`)."));
}
use of org.apache.kafka.common.header.internals.RecordHeaders in project kafka by apache.
the class RecordCollectorTest method shouldSendWithNoPartition.
@Test
public void shouldSendWithNoPartition() {
final Headers headers = new RecordHeaders(new Header[] { new RecordHeader("key", "value".getBytes()) });
collector.send(topic, "3", "0", headers, null, null, stringSerializer, stringSerializer);
collector.send(topic, "9", "0", headers, null, null, stringSerializer, stringSerializer);
collector.send(topic, "27", "0", headers, null, null, stringSerializer, stringSerializer);
collector.send(topic, "81", "0", headers, null, null, stringSerializer, stringSerializer);
collector.send(topic, "243", "0", headers, null, null, stringSerializer, stringSerializer);
collector.send(topic, "28", "0", headers, null, null, stringSerializer, stringSerializer);
collector.send(topic, "82", "0", headers, null, null, stringSerializer, stringSerializer);
collector.send(topic, "244", "0", headers, null, null, stringSerializer, stringSerializer);
collector.send(topic, "245", "0", headers, null, null, stringSerializer, stringSerializer);
final Map<TopicPartition, Long> offsets = collector.offsets();
// with mock producer without specific partition, we would use default producer partitioner with murmur hash
assertEquals(3L, (long) offsets.get(new TopicPartition(topic, 0)));
assertEquals(2L, (long) offsets.get(new TopicPartition(topic, 1)));
assertEquals(1L, (long) offsets.get(new TopicPartition(topic, 2)));
assertEquals(9, mockProducer.history().size());
}
Aggregations