use of org.apache.kafka.common.serialization.IntegerSerializer in project sda-dropwizard-commons by SDA-SE.
the class ProducerRegistrationTest method serializerShouldBeSetCorrectly.
@Test
public void serializerShouldBeSetCorrectly() {
ProducerRegistration<Long, Integer> producerRegistration = ProducerRegistration.builder().forTopic("TOPIC").withDefaultProducer().withKeySerializer(new LongSerializer()).withValueSerializer(new IntegerSerializer()).build();
assertThat(producerRegistration).isNotNull();
assertThat(producerRegistration.getTopic().getTopicName()).isEqualTo("TOPIC");
assertThat(producerRegistration.getKeySerializer()).isInstanceOf(LongSerializer.class);
assertThat(producerRegistration.getValueSerializer()).isInstanceOf(IntegerSerializer.class);
}
use of org.apache.kafka.common.serialization.IntegerSerializer in project kafka-junit by charithe.
the class EphemeralKafkaClusterTest method testStartAndStop.
@Test
public void testStartAndStop() throws Exception {
try (KafkaConsumer<Integer, String> consumer = new KafkaConsumer<>(cluster.consumerConfig(false), new IntegerDeserializer(), new StringDeserializer());
KafkaProducer<Integer, String> producer = new KafkaProducer<>(cluster.producerConfig(), new IntegerSerializer(), new StringSerializer())) {
cluster.createTopics(TEST_TOPIC);
producer.send(new ProducerRecord<>(TEST_TOPIC, "value"));
producer.flush();
consumer.subscribe(Collections.singleton(TEST_TOPIC));
ConsumerRecords<Integer, String> poll = consumer.poll(TEN_SECONDS);
assertThat(poll.count()).isEqualTo(1);
assertThat(poll.iterator().next().value()).isEqualTo("value");
}
}
use of org.apache.kafka.common.serialization.IntegerSerializer in project kafka by apache.
the class StoreQueryIntegrationTest method shouldQuerySpecificStalePartitionStoresMultiStreamThreadsNamedTopology.
@Test
public void shouldQuerySpecificStalePartitionStoresMultiStreamThreadsNamedTopology() throws Exception {
final int batch1NumMessages = 100;
final int key = 1;
final Semaphore semaphore = new Semaphore(0);
final int numStreamThreads = 2;
final Properties streamsConfiguration1 = streamsConfiguration();
streamsConfiguration1.put(StreamsConfig.NUM_STREAM_THREADS_CONFIG, numStreamThreads);
final Properties streamsConfiguration2 = streamsConfiguration();
streamsConfiguration2.put(StreamsConfig.NUM_STREAM_THREADS_CONFIG, numStreamThreads);
final String topologyA = "topology-A";
final KafkaStreamsNamedTopologyWrapper kafkaStreams1 = createNamedTopologyKafkaStreams(streamsConfiguration1);
final KafkaStreamsNamedTopologyWrapper kafkaStreams2 = createNamedTopologyKafkaStreams(streamsConfiguration2);
final List<KafkaStreams> kafkaStreamsList = Arrays.asList(kafkaStreams1, kafkaStreams2);
final NamedTopologyBuilder builder1A = kafkaStreams1.newNamedTopologyBuilder(topologyA, streamsConfiguration1);
getStreamsBuilderWithTopology(builder1A, semaphore);
final NamedTopologyBuilder builder2A = kafkaStreams2.newNamedTopologyBuilder(topologyA, streamsConfiguration2);
getStreamsBuilderWithTopology(builder2A, semaphore);
kafkaStreams1.start(builder1A.build());
kafkaStreams2.start(builder2A.build());
waitForApplicationState(kafkaStreamsList, State.RUNNING, Duration.ofSeconds(60));
assertTrue(kafkaStreams1.metadataForLocalThreads().size() > 1);
assertTrue(kafkaStreams2.metadataForLocalThreads().size() > 1);
produceValueRange(key, 0, batch1NumMessages);
// Assert that all messages in the first batch were processed in a timely manner
assertThat(semaphore.tryAcquire(batch1NumMessages, 60, TimeUnit.SECONDS), is(equalTo(true)));
final KeyQueryMetadata keyQueryMetadata = kafkaStreams1.queryMetadataForKey(TABLE_NAME, key, new IntegerSerializer(), topologyA);
// key belongs to this partition
final int keyPartition = keyQueryMetadata.partition();
// key doesn't belongs to this partition
final int keyDontBelongPartition = (keyPartition == 0) ? 1 : 0;
final QueryableStoreType<ReadOnlyKeyValueStore<Integer, Integer>> queryableStoreType = keyValueStore();
// Assert that both active and standby are able to query for a key
final NamedTopologyStoreQueryParameters<ReadOnlyKeyValueStore<Integer, Integer>> param = NamedTopologyStoreQueryParameters.fromNamedTopologyAndStoreNameAndType(topologyA, TABLE_NAME, queryableStoreType).enableStaleStores().withPartition(keyPartition);
TestUtils.waitForCondition(() -> {
final ReadOnlyKeyValueStore<Integer, Integer> store1 = getStore(kafkaStreams1, param);
return store1.get(key) != null;
}, "store1 cannot find results for key");
TestUtils.waitForCondition(() -> {
final ReadOnlyKeyValueStore<Integer, Integer> store2 = getStore(kafkaStreams2, param);
return store2.get(key) != null;
}, "store2 cannot find results for key");
final NamedTopologyStoreQueryParameters<ReadOnlyKeyValueStore<Integer, Integer>> otherParam = NamedTopologyStoreQueryParameters.fromNamedTopologyAndStoreNameAndType(topologyA, TABLE_NAME, queryableStoreType).enableStaleStores().withPartition(keyDontBelongPartition);
final ReadOnlyKeyValueStore<Integer, Integer> store3 = getStore(kafkaStreams1, otherParam);
final ReadOnlyKeyValueStore<Integer, Integer> store4 = getStore(kafkaStreams2, otherParam);
// Assert that
assertThat(store3.get(key), is(nullValue()));
assertThat(store4.get(key), is(nullValue()));
}
use of org.apache.kafka.common.serialization.IntegerSerializer in project kafka by apache.
the class RestoreIntegrationTest method createStateForRestoration.
private void createStateForRestoration(final String changelogTopic, final int startingOffset) {
final Properties producerConfig = new Properties();
producerConfig.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, CLUSTER.bootstrapServers());
try (final KafkaProducer<Integer, Integer> producer = new KafkaProducer<>(producerConfig, new IntegerSerializer(), new IntegerSerializer())) {
for (int i = 0; i < numberOfKeys; i++) {
final int offset = startingOffset + i;
producer.send(new ProducerRecord<>(changelogTopic, offset, offset));
}
}
}
use of org.apache.kafka.common.serialization.IntegerSerializer in project kafka by apache.
the class KStreamSplitTest method withDriver.
private void withDriver(final Consumer<TopologyTestDriver> test) {
final int[] expectedKeys = new int[] { -1, 0, 1, 2, 3, 4, 5, 6, 7 };
final Topology topology = builder.build();
try (final TopologyTestDriver driver = new TopologyTestDriver(topology, props)) {
final TestInputTopic<Integer, String> inputTopic = driver.createInputTopic(topicName, new IntegerSerializer(), new StringSerializer());
for (final int expectedKey : expectedKeys) {
inputTopic.pipeInput(expectedKey, "V" + expectedKey);
}
test.accept(driver);
}
}
Aggregations