use of org.apache.samza.test.util.TestKafkaEvent in project samza by apache.
the class TestStartpoint method testStartpointOldest.
@Test
public void testStartpointOldest() throws InterruptedException {
publishKafkaEventsWithDelayPerEvent(inputKafkaTopic3, 0, NUM_KAFKA_EVENTS, PROCESSOR_IDS[2], Duration.ofMillis(2));
ConcurrentHashMap<String, IncomingMessageEnvelope> recvEventsInputStartpointOldest = new ConcurrentHashMap<>();
CoordinatorStreamStore coordinatorStreamStore = createCoordinatorStreamStore(applicationConfig1);
coordinatorStreamStore.init();
StartpointManager startpointManager = new StartpointManager(coordinatorStreamStore);
startpointManager.start();
StartpointOldest startpointOldest = new StartpointOldest();
writeStartpoints(startpointManager, inputKafkaTopic3, ZK_TEST_PARTITION_COUNT, startpointOldest);
startpointManager.stop();
coordinatorStreamStore.close();
TestTaskApplication.TaskApplicationProcessCallback processedCallback = (IncomingMessageEnvelope ime, TaskCallback callback) -> {
try {
String streamName = ime.getSystemStreamPartition().getStream();
TestKafkaEvent testKafkaEvent = TestKafkaEvent.fromString((String) ime.getMessage());
String eventIndex = testKafkaEvent.getEventData();
if (inputKafkaTopic3.equals(streamName)) {
recvEventsInputStartpointOldest.put(eventIndex, ime);
} else {
throw new RuntimeException("Unexpected input stream: " + streamName);
}
callback.complete();
} catch (Exception ex) {
callback.failure(ex);
}
};
// Fetch all since consuming from oldest
CountDownLatch processedMessagesLatchStartpointOldest = new CountDownLatch(NUM_KAFKA_EVENTS);
CountDownLatch shutdownLatchStartpointOldest = new CountDownLatch(1);
TestTaskApplication testTaskApplicationStartpointOldest = new TestTaskApplication(TEST_SYSTEM, inputKafkaTopic3, outputKafkaTopic, processedMessagesLatchStartpointOldest, shutdownLatchStartpointOldest, Optional.of(processedCallback));
ApplicationRunner appRunner = ApplicationRunners.getApplicationRunner(testTaskApplicationStartpointOldest, applicationConfig3);
executeRun(appRunner, applicationConfig3);
assertTrue(processedMessagesLatchStartpointOldest.await(1, TimeUnit.MINUTES));
appRunner.kill();
appRunner.waitForFinish();
assertTrue(shutdownLatchStartpointOldest.await(1, TimeUnit.MINUTES));
assertEquals("Expecting to have processed all the events", NUM_KAFKA_EVENTS, recvEventsInputStartpointOldest.size());
}
use of org.apache.samza.test.util.TestKafkaEvent in project samza by apache.
the class TestStartpoint method testStartpointUpcoming.
@Test
public void testStartpointUpcoming() throws InterruptedException {
publishKafkaEventsWithDelayPerEvent(inputKafkaTopic4, 0, NUM_KAFKA_EVENTS, PROCESSOR_IDS[3], Duration.ofMillis(2));
ConcurrentHashMap<String, IncomingMessageEnvelope> recvEventsInputStartpointUpcoming = new ConcurrentHashMap<>();
CoordinatorStreamStore coordinatorStreamStore = createCoordinatorStreamStore(applicationConfig1);
coordinatorStreamStore.init();
StartpointManager startpointManager = new StartpointManager(coordinatorStreamStore);
startpointManager.start();
StartpointUpcoming startpointUpcoming = new StartpointUpcoming();
writeStartpoints(startpointManager, inputKafkaTopic4, ZK_TEST_PARTITION_COUNT, startpointUpcoming);
startpointManager.stop();
coordinatorStreamStore.close();
TestTaskApplication.TaskApplicationProcessCallback processedCallback = (IncomingMessageEnvelope ime, TaskCallback callback) -> {
try {
String streamName = ime.getSystemStreamPartition().getStream();
TestKafkaEvent testKafkaEvent = TestKafkaEvent.fromString((String) ime.getMessage());
String eventIndex = testKafkaEvent.getEventData();
if (inputKafkaTopic4.equals(streamName)) {
recvEventsInputStartpointUpcoming.put(eventIndex, ime);
} else {
throw new RuntimeException("Unexpected input stream: " + streamName);
}
callback.complete();
} catch (Exception ex) {
callback.failure(ex);
}
};
// Expecting none, so just attempt a small number of fetches.
CountDownLatch processedMessagesLatchStartpointUpcoming = new CountDownLatch(5);
CountDownLatch shutdownLatchStartpointUpcoming = new CountDownLatch(1);
TestTaskApplication testTaskApplicationStartpointUpcoming = new TestTaskApplication(TEST_SYSTEM, inputKafkaTopic4, outputKafkaTopic, processedMessagesLatchStartpointUpcoming, shutdownLatchStartpointUpcoming, Optional.of(processedCallback));
// Startpoint upcoming
ApplicationRunner appRunner = ApplicationRunners.getApplicationRunner(testTaskApplicationStartpointUpcoming, applicationConfig4);
executeRun(appRunner, applicationConfig4);
assertFalse("Expecting to timeout and not process any old messages.", processedMessagesLatchStartpointUpcoming.await(15, TimeUnit.SECONDS));
assertEquals("Expecting not to process any old messages.", 0, recvEventsInputStartpointUpcoming.size());
appRunner.kill();
appRunner.waitForFinish();
assertTrue(shutdownLatchStartpointUpcoming.await(1, TimeUnit.MINUTES));
}
use of org.apache.samza.test.util.TestKafkaEvent in project samza by apache.
the class TestStartpoint method testStartpointSpecific.
@Test
public void testStartpointSpecific() throws InterruptedException {
Map<Integer, RecordMetadata> sentEvents1 = publishKafkaEventsWithDelayPerEvent(inputKafkaTopic1, 0, NUM_KAFKA_EVENTS, PROCESSOR_IDS[0], Duration.ofMillis(2));
ConcurrentHashMap<String, IncomingMessageEnvelope> recvEventsInputStartpointSpecific = new ConcurrentHashMap<>();
CoordinatorStreamStore coordinatorStreamStore = createCoordinatorStreamStore(applicationConfig1);
coordinatorStreamStore.init();
StartpointManager startpointManager = new StartpointManager(coordinatorStreamStore);
startpointManager.start();
StartpointSpecific startpointSpecific = new StartpointSpecific(String.valueOf(sentEvents1.get(100).offset()));
writeStartpoints(startpointManager, inputKafkaTopic1, ZK_TEST_PARTITION_COUNT, startpointSpecific);
startpointManager.stop();
coordinatorStreamStore.close();
TestTaskApplication.TaskApplicationProcessCallback processedCallback = (IncomingMessageEnvelope ime, TaskCallback callback) -> {
try {
String streamName = ime.getSystemStreamPartition().getStream();
TestKafkaEvent testKafkaEvent = TestKafkaEvent.fromString((String) ime.getMessage());
String eventIndex = testKafkaEvent.getEventData();
if (inputKafkaTopic1.equals(streamName)) {
recvEventsInputStartpointSpecific.put(eventIndex, ime);
} else {
throw new RuntimeException("Unexpected input stream: " + streamName);
}
callback.complete();
} catch (Exception ex) {
callback.failure(ex);
}
};
// Just fetch a few messages
CountDownLatch processedMessagesLatchStartpointSpecific = new CountDownLatch(100);
CountDownLatch shutdownLatchStartpointSpecific = new CountDownLatch(1);
TestTaskApplication testTaskApplicationStartpointSpecific = new TestTaskApplication(TEST_SYSTEM, inputKafkaTopic1, outputKafkaTopic, processedMessagesLatchStartpointSpecific, shutdownLatchStartpointSpecific, Optional.of(processedCallback));
ApplicationRunner appRunner = ApplicationRunners.getApplicationRunner(testTaskApplicationStartpointSpecific, applicationConfig1);
executeRun(appRunner, applicationConfig1);
assertTrue(processedMessagesLatchStartpointSpecific.await(1, TimeUnit.MINUTES));
appRunner.kill();
appRunner.waitForFinish();
assertTrue(shutdownLatchStartpointSpecific.await(1, TimeUnit.MINUTES));
Integer startpointSpecificOffset = Integer.valueOf(startpointSpecific.getSpecificOffset());
for (IncomingMessageEnvelope ime : recvEventsInputStartpointSpecific.values()) {
Integer eventOffset = Integer.valueOf(ime.getOffset());
String assertMsg = String.format("Expecting message offset: %d >= Startpoint specific offset: %d", eventOffset, startpointSpecificOffset);
assertTrue(assertMsg, eventOffset >= startpointSpecificOffset);
}
}
use of org.apache.samza.test.util.TestKafkaEvent in project samza by apache.
the class TestStartpoint method publishKafkaEventsWithDelayPerEvent.
// Sends each event synchronously and returns map of event index to event metadata of the sent record/event. Adds the specified delay between sends.
private Map<Integer, RecordMetadata> publishKafkaEventsWithDelayPerEvent(String topic, int startIndex, int endIndex, String streamProcessorId, Duration delay) {
HashMap<Integer, RecordMetadata> eventsMetadata = new HashMap<>();
for (int eventIndex = startIndex; eventIndex < endIndex; eventIndex++) {
try {
LOGGER.info("Publish kafka event with index : {} for stream processor: {}.", eventIndex, streamProcessorId);
TestKafkaEvent testKafkaEvent = new TestKafkaEvent(streamProcessorId, String.valueOf(eventIndex));
Thread.sleep(delay.toMillis());
Future<RecordMetadata> send = producer.send(new ProducerRecord(topic, testKafkaEvent.toString().getBytes()));
eventsMetadata.put(eventIndex, send.get(5, TimeUnit.SECONDS));
} catch (Exception e) {
LOGGER.error("Publishing to kafka topic: {} resulted in exception: {}.", new Object[] { topic, e });
throw new SamzaException(e);
}
}
producer.flush();
return eventsMetadata;
}
use of org.apache.samza.test.util.TestKafkaEvent in project samza by apache.
the class TestStartpoint method testStartpointTimestamp.
@Test
public void testStartpointTimestamp() throws InterruptedException {
Map<Integer, RecordMetadata> sentEvents2 = publishKafkaEventsWithDelayPerEvent(inputKafkaTopic2, 0, NUM_KAFKA_EVENTS, PROCESSOR_IDS[1], Duration.ofMillis(2));
ConcurrentHashMap<String, IncomingMessageEnvelope> recvEventsInputStartpointTimestamp = new ConcurrentHashMap<>();
CoordinatorStreamStore coordinatorStreamStore = createCoordinatorStreamStore(applicationConfig1);
coordinatorStreamStore.init();
StartpointManager startpointManager = new StartpointManager(coordinatorStreamStore);
startpointManager.start();
StartpointTimestamp startpointTimestamp = new StartpointTimestamp(sentEvents2.get(150).timestamp());
writeStartpoints(startpointManager, inputKafkaTopic2, ZK_TEST_PARTITION_COUNT, startpointTimestamp);
startpointManager.stop();
coordinatorStreamStore.close();
TestTaskApplication.TaskApplicationProcessCallback processedCallback = (IncomingMessageEnvelope ime, TaskCallback callback) -> {
try {
String streamName = ime.getSystemStreamPartition().getStream();
TestKafkaEvent testKafkaEvent = TestKafkaEvent.fromString((String) ime.getMessage());
String eventIndex = testKafkaEvent.getEventData();
if (inputKafkaTopic2.equals(streamName)) {
recvEventsInputStartpointTimestamp.put(eventIndex, ime);
} else {
throw new RuntimeException("Unexpected input stream: " + streamName);
}
callback.complete();
} catch (Exception ex) {
callback.failure(ex);
}
};
// Just fetch a few messages
CountDownLatch processedMessagesLatchStartpointTimestamp = new CountDownLatch(100);
CountDownLatch shutdownLatchStartpointTimestamp = new CountDownLatch(1);
TestTaskApplication testTaskApplicationStartpointTimestamp = new TestTaskApplication(TEST_SYSTEM, inputKafkaTopic2, outputKafkaTopic, processedMessagesLatchStartpointTimestamp, shutdownLatchStartpointTimestamp, Optional.of(processedCallback));
ApplicationRunner appRunner = ApplicationRunners.getApplicationRunner(testTaskApplicationStartpointTimestamp, applicationConfig2);
executeRun(appRunner, applicationConfig2);
assertTrue(processedMessagesLatchStartpointTimestamp.await(1, TimeUnit.MINUTES));
appRunner.kill();
appRunner.waitForFinish();
assertTrue(shutdownLatchStartpointTimestamp.await(1, TimeUnit.MINUTES));
for (IncomingMessageEnvelope ime : recvEventsInputStartpointTimestamp.values()) {
Integer eventOffset = Integer.valueOf(ime.getOffset());
// sanity check
assertNotEquals(0, ime.getEventTime());
String assertMsg = String.format("Expecting message timestamp: %d >= Startpoint timestamp: %d", ime.getEventTime(), startpointTimestamp.getTimestampOffset());
assertTrue(assertMsg, ime.getEventTime() >= startpointTimestamp.getTimestampOffset());
}
}
Aggregations