use of org.apache.samza.test.util.TestKafkaEvent in project samza by apache.
the class TestZkLocalApplicationRunner method testRollingUpgradeOfStreamApplicationsShouldGenerateSameJobModel.
@Test
public void testRollingUpgradeOfStreamApplicationsShouldGenerateSameJobModel() throws Exception {
// Set up kafka topics.
publishKafkaEvents(inputKafkaTopic, 0, NUM_KAFKA_EVENTS, PROCESSOR_IDS[0]);
Map<String, String> configMap = buildStreamApplicationConfigMap(testStreamAppName, testStreamAppId, false, Optional.empty());
configMap.put(JobConfig.PROCESSOR_ID, PROCESSOR_IDS[0]);
Config applicationConfig1 = new MapConfig(configMap);
configMap.put(JobConfig.PROCESSOR_ID, PROCESSOR_IDS[1]);
Config applicationConfig2 = new MapConfig(configMap);
List<TestKafkaEvent> messagesProcessed = new ArrayList<>();
TestStreamApplication.StreamApplicationCallback streamApplicationCallback = messagesProcessed::add;
// Create StreamApplication from configuration.
CountDownLatch kafkaEventsConsumedLatch = new CountDownLatch(NUM_KAFKA_EVENTS);
CountDownLatch processedMessagesLatch1 = new CountDownLatch(1);
CountDownLatch processedMessagesLatch2 = new CountDownLatch(1);
ApplicationRunner appRunner1 = ApplicationRunners.getApplicationRunner(TestStreamApplication.getInstance(TEST_SYSTEM, ImmutableList.of(inputKafkaTopic), outputKafkaTopic, processedMessagesLatch1, null, kafkaEventsConsumedLatch, applicationConfig1), applicationConfig1);
ApplicationRunner appRunner2 = ApplicationRunners.getApplicationRunner(TestStreamApplication.getInstance(TEST_SYSTEM, ImmutableList.of(inputKafkaTopic), outputKafkaTopic, processedMessagesLatch2, null, kafkaEventsConsumedLatch, applicationConfig2), applicationConfig2);
// Run stream application.
executeRun(appRunner1, applicationConfig1);
executeRun(appRunner2, applicationConfig2);
processedMessagesLatch1.await();
processedMessagesLatch2.await();
// Read job model before rolling upgrade.
String jobModelVersion = zkUtils.getJobModelVersion();
JobModel jobModel = JobModelUtil.readJobModel(jobModelVersion, zkMetadataStore);
appRunner1.kill();
appRunner1.waitForFinish();
int lastProcessedMessageId = -1;
for (TestKafkaEvent message : messagesProcessed) {
lastProcessedMessageId = Math.max(lastProcessedMessageId, Integer.parseInt(message.getEventData()));
}
messagesProcessed.clear();
assertEquals(ApplicationStatus.SuccessfulFinish, appRunner1.status());
processedMessagesLatch1 = new CountDownLatch(1);
publishKafkaEvents(inputKafkaTopic, NUM_KAFKA_EVENTS, 2 * NUM_KAFKA_EVENTS, PROCESSOR_IDS[0]);
ApplicationRunner appRunner3 = ApplicationRunners.getApplicationRunner(TestStreamApplication.getInstance(TEST_SYSTEM, ImmutableList.of(inputKafkaTopic), outputKafkaTopic, processedMessagesLatch1, null, kafkaEventsConsumedLatch, applicationConfig1), applicationConfig1);
executeRun(appRunner3, applicationConfig1);
processedMessagesLatch1.await();
// Read new job model after rolling upgrade.
String newJobModelVersion = zkUtils.getJobModelVersion();
JobModel newJobModel = JobModelUtil.readJobModel(newJobModelVersion, zkMetadataStore);
assertEquals(Integer.parseInt(jobModelVersion) + 1, Integer.parseInt(newJobModelVersion));
assertEquals(jobModel.getContainers(), newJobModel.getContainers());
appRunner2.kill();
appRunner2.waitForFinish();
assertEquals(ApplicationStatus.SuccessfulFinish, appRunner2.status());
appRunner3.kill();
appRunner3.waitForFinish();
assertEquals(ApplicationStatus.SuccessfulFinish, appRunner3.status());
}
use of org.apache.samza.test.util.TestKafkaEvent in project samza by apache.
the class TestZkLocalApplicationRunner method publishKafkaEvents.
private void publishKafkaEvents(String topic, int startIndex, int endIndex, String streamProcessorId) {
int partitionCount = topicToPartitionCount.getOrDefault(topic, 1);
for (int eventIndex = startIndex; eventIndex < endIndex; eventIndex++) {
try {
LOGGER.info("Publish kafka event with index : {} for stream processor: {}.", eventIndex, streamProcessorId);
producer.send(new ProducerRecord(topic, eventIndex % partitionCount, null, new TestKafkaEvent(streamProcessorId, String.valueOf(eventIndex)).toString().getBytes()));
} catch (Exception e) {
LOGGER.error("Publishing to kafka topic: {} resulted in exception: {}.", new Object[] { topic, e });
throw new SamzaException(e);
}
}
}
Aggregations