use of com.hazelcast.jet.core.test.TestProcessorContext in project hazelcast by hazelcast.
the class StreamKafkaPTest method when_noAssignedPartitionAndAddedLater_then_resumesFromIdle.
@Test
public void when_noAssignedPartitionAndAddedLater_then_resumesFromIdle() throws Exception {
// we ask to create 5th out of 5 processors, but we have only 4 partitions and 1 topic
// --> our processor will have nothing assigned
StreamKafkaP processor = createProcessor(properties(), 1, r -> entry(r.key(), r.value()), 10_000);
TestOutbox outbox = new TestOutbox(new int[] { 10 }, 10);
processor.init(outbox, new TestProcessorContext().setTotalParallelism(INITIAL_PARTITION_COUNT + 1).setGlobalProcessorIndex(INITIAL_PARTITION_COUNT));
assertTrue(processor.currentAssignment.isEmpty());
assertEquals(IDLE_MESSAGE, consumeEventually(processor, outbox));
// add a partition and produce an event to it
kafkaTestSupport.setPartitionCount(topic1Name, INITIAL_PARTITION_COUNT + 1);
Entry<Integer, String> value = produceEventToNewPartition(INITIAL_PARTITION_COUNT);
Object actualEvent;
do {
actualEvent = consumeEventually(processor, outbox);
} while (actualEvent instanceof Watermark);
assertEquals(value, actualEvent);
}
use of com.hazelcast.jet.core.test.TestProcessorContext in project hazelcast by hazelcast.
the class StreamKafkaPTest method when_noAssignedPartitions_thenEmitIdleMsgImmediately.
@Test
public void when_noAssignedPartitions_thenEmitIdleMsgImmediately() throws Exception {
StreamKafkaP processor = createProcessor(properties(), 2, r -> entry(r.key(), r.value()), 100_000);
TestOutbox outbox = new TestOutbox(new int[] { 10 }, 10);
TestProcessorContext context = new TestProcessorContext().setTotalParallelism(INITIAL_PARTITION_COUNT * 2 + 1).setGlobalProcessorIndex(INITIAL_PARTITION_COUNT * 2);
processor.init(outbox, context);
processor.complete();
assertEquals(IDLE_MESSAGE, outbox.queue(0).poll());
}
use of com.hazelcast.jet.core.test.TestProcessorContext in project hazelcast by hazelcast.
the class StreamKafkaPTest method when_partitionAdded_then_consumedFromBeginning.
@Test
public void when_partitionAdded_then_consumedFromBeginning() throws Exception {
Properties properties = properties();
properties.setProperty("metadata.max.age.ms", "100");
StreamKafkaP processor = createProcessor(properties, 2, r -> entry(r.key(), r.value()), 10_000);
TestOutbox outbox = new TestOutbox(new int[] { 10 }, 10);
processor.init(outbox, new TestProcessorContext());
kafkaTestSupport.produce(topic1Name, 0, "0");
assertEquals(entry(0, "0"), consumeEventually(processor, outbox));
kafkaTestSupport.setPartitionCount(topic1Name, INITIAL_PARTITION_COUNT + 2);
// this allows production to the added partition
kafkaTestSupport.resetProducer();
boolean somethingInPartition1 = false;
for (int i = 1; i < 11; i++) {
Future<RecordMetadata> future = kafkaTestSupport.produce(topic1Name, i, Integer.toString(i));
RecordMetadata recordMetadata = future.get();
System.out.println("Entry " + i + " produced to partition " + recordMetadata.partition());
somethingInPartition1 |= recordMetadata.partition() == 1;
}
assertTrue("nothing was produced to partition-1", somethingInPartition1);
Set<Object> receivedEvents = new HashSet<>();
for (int i = 1; i < 11; i++) {
try {
receivedEvents.add(consumeEventually(processor, outbox));
} catch (AssertionError e) {
throw new AssertionError("Unable to receive 10 items, events so far: " + receivedEvents);
}
}
assertEquals(range(1, 11).mapToObj(i -> entry(i, Integer.toString(i))).collect(toSet()), receivedEvents);
}
Aggregations