use of org.apache.kafka.streams.processor.api.MockProcessorContext in project kafka by apache.
the class KTableKTableInnerJoinTest method shouldLogAndMeterSkippedRecordsDueToNullLeftKey.
@Test
public void shouldLogAndMeterSkippedRecordsDueToNullLeftKey() {
final StreamsBuilder builder = new StreamsBuilder();
@SuppressWarnings("unchecked") final Processor<String, Change<String>, String, Change<Object>> join = new KTableKTableInnerJoin<>((KTableImpl<String, String, String>) builder.table("left", Consumed.with(Serdes.String(), Serdes.String())), (KTableImpl<String, String, String>) builder.table("right", Consumed.with(Serdes.String(), Serdes.String())), null).get();
final MockProcessorContext<String, Change<Object>> context = new MockProcessorContext<>(props);
context.setRecordMetadata("left", -1, -2);
join.init(context);
try (final LogCaptureAppender appender = LogCaptureAppender.createAndRegister(KTableKTableInnerJoin.class)) {
join.process(new Record<>(null, new Change<>("new", "old"), 0));
assertThat(appender.getMessages(), hasItem("Skipping record due to null key. topic=[left] partition=[-1] offset=[-2]"));
}
}
use of org.apache.kafka.streams.processor.api.MockProcessorContext in project kafka by apache.
the class KTableKTableOuterJoinTest method shouldLogAndMeterSkippedRecordsDueToNullLeftKey.
@Test
public void shouldLogAndMeterSkippedRecordsDueToNullLeftKey() {
final StreamsBuilder builder = new StreamsBuilder();
@SuppressWarnings("unchecked") final Processor<String, Change<String>, String, Change<Object>> join = new KTableKTableOuterJoin<>((KTableImpl<String, String, String>) builder.table("left", Consumed.with(Serdes.String(), Serdes.String())), (KTableImpl<String, String, String>) builder.table("right", Consumed.with(Serdes.String(), Serdes.String())), null).get();
final MockProcessorContext<String, Change<Object>> context = new MockProcessorContext<>(props);
context.setRecordMetadata("left", -1, -2);
join.init(context);
try (final LogCaptureAppender appender = LogCaptureAppender.createAndRegister(KTableKTableOuterJoin.class)) {
join.process(new Record<>(null, new Change<>("new", "old"), 0));
assertThat(appender.getMessages(), hasItem("Skipping record due to null key. topic=[left] partition=[-1] offset=[-2]"));
}
}
use of org.apache.kafka.streams.processor.api.MockProcessorContext in project kafka by apache.
the class WindowedWordCountProcessorTest method shouldWorkWithPersistentStore.
@Test
public void shouldWorkWithPersistentStore() throws IOException {
final File stateDir = TestUtils.tempDirectory();
try {
final MockProcessorContext<String, String> context = new MockProcessorContext<>(new Properties(), new TaskId(0, 0), stateDir);
// Create, initialize, and register the state store.
final WindowStore<String, Integer> store = Stores.windowStoreBuilder(Stores.persistentWindowStore("WindowedCounts", Duration.ofDays(24), Duration.ofMillis(100), false), Serdes.String(), Serdes.Integer()).withLoggingDisabled().withCachingDisabled().build();
store.init(context.getStateStoreContext(), store);
context.getStateStoreContext().register(store, null);
// Create and initialize the processor under test
final Processor<String, String, String, String> processor = new WindowedWordCountProcessorSupplier().get();
processor.init(context);
// send a record to the processor
processor.process(new Record<>("key", "alpha beta gamma alpha", 101L));
// send a record to the processor in a new window
processor.process(new Record<>("key", "gamma delta", 221L));
// note that the processor does not forward during process()
assertThat(context.forwarded().isEmpty(), is(true));
// now, we trigger the punctuator, which iterates over the state store and forwards the contents.
context.scheduledPunctuators().get(0).getPunctuator().punctuate(1_000L);
// finally, we can verify the output.
final List<CapturedForward<? extends String, ? extends String>> capturedForwards = context.forwarded();
final List<CapturedForward<? extends String, ? extends String>> expected = asList(new CapturedForward<>(new Record<>("[alpha@100/200]", "2", 1_000L)), new CapturedForward<>(new Record<>("[beta@100/200]", "1", 1_000L)), new CapturedForward<>(new Record<>("[delta@200/300]", "1", 1_000L)), new CapturedForward<>(new Record<>("[gamma@100/200]", "1", 1_000L)), new CapturedForward<>(new Record<>("[gamma@200/300]", "1", 1_000L)));
assertThat(capturedForwards, is(expected));
store.close();
} finally {
Utils.delete(stateDir);
}
}
use of org.apache.kafka.streams.processor.api.MockProcessorContext in project kafka by apache.
the class KTableKTableLeftJoinTest method shouldLogAndMeterSkippedRecordsDueToNullLeftKey.
@Test
public void shouldLogAndMeterSkippedRecordsDueToNullLeftKey() {
final StreamsBuilder builder = new StreamsBuilder();
@SuppressWarnings("unchecked") final Processor<String, Change<String>, String, Change<Object>> join = new KTableKTableLeftJoin<>((KTableImpl<String, String, String>) builder.table("left", Consumed.with(Serdes.String(), Serdes.String())), (KTableImpl<String, String, String>) builder.table("right", Consumed.with(Serdes.String(), Serdes.String())), null).get();
final MockProcessorContext<String, Change<Object>> context = new MockProcessorContext<>(props);
context.setRecordMetadata("left", -1, -2);
join.init(context);
try (final LogCaptureAppender appender = LogCaptureAppender.createAndRegister(KTableKTableLeftJoin.class)) {
join.process(new Record<>(null, new Change<>("new", "old"), 0));
assertThat(appender.getMessages(), hasItem("Skipping record due to null key. topic=[left] partition=[-1] offset=[-2]"));
}
}
use of org.apache.kafka.streams.processor.api.MockProcessorContext in project kafka by apache.
the class KTableKTableRightJoinTest method shouldLogAndMeterSkippedRecordsDueToNullLeftKeyWithBuiltInMetricsVersionLatest.
@Test
public void shouldLogAndMeterSkippedRecordsDueToNullLeftKeyWithBuiltInMetricsVersionLatest() {
final StreamsBuilder builder = new StreamsBuilder();
@SuppressWarnings("unchecked") final Processor<String, Change<String>, String, Change<Object>> join = new KTableKTableRightJoin<>((KTableImpl<String, String, String>) builder.table("left", Consumed.with(Serdes.String(), Serdes.String())), (KTableImpl<String, String, String>) builder.table("right", Consumed.with(Serdes.String(), Serdes.String())), null).get();
props.setProperty(StreamsConfig.BUILT_IN_METRICS_VERSION_CONFIG, StreamsConfig.METRICS_LATEST);
final MockProcessorContext<String, Change<Object>> context = new MockProcessorContext<>(props);
context.setRecordMetadata("left", -1, -2);
join.init(context);
try (final LogCaptureAppender appender = LogCaptureAppender.createAndRegister(KTableKTableRightJoin.class)) {
join.process(new Record<>(null, new Change<>("new", "old"), 0));
assertThat(appender.getEvents().stream().filter(e -> e.getLevel().equals("WARN")).map(Event::getMessage).collect(Collectors.toList()), hasItem("Skipping record due to null key. topic=[left] partition=[-1] offset=[-2]"));
}
}
Aggregations