use of org.apache.kafka.common.serialization.Serdes.String in project kafka by apache.
the class KTableSuppressProcessorTest method finalResultsWithZeroGraceShouldStillBufferUntilTheWindowEnd.
/**
* Testing a special case of final results: that even with a grace period of 0,
* it will still buffer events and emit only after the end of the window.
* As opposed to emitting immediately the way regular suppression would with a time limit of 0.
*/
@Test
public void finalResultsWithZeroGraceShouldStillBufferUntilTheWindowEnd() {
final Harness<Windowed<String>, Long> harness = new Harness<>(finalResults(ofMillis(0L)), timeWindowedSerdeFrom(String.class, 100L), Long());
final MockInternalNewProcessorContext<Windowed<String>, Change<Long>> context = harness.context;
// note the record is in the past, but the window end is in the future, so we still have to buffer,
// even though the grace period is 0.
final long timestamp = 5L;
final long windowEnd = 100L;
context.setRecordMetadata("", 0, 0L);
context.setTimestamp(timestamp);
final Windowed<String> key = new Windowed<>("hey", new TimeWindow(0, windowEnd));
final Change<Long> value = ARBITRARY_CHANGE;
harness.processor.process(new Record<>(key, value, timestamp));
assertThat(context.forwarded(), hasSize(0));
context.setRecordMetadata("", 0, 1L);
context.setTimestamp(windowEnd);
harness.processor.process(new Record<>(new Windowed<>("dummyKey", new TimeWindow(windowEnd, windowEnd + 100L)), ARBITRARY_CHANGE, windowEnd));
assertThat(context.forwarded(), hasSize(1));
final MockProcessorContext.CapturedForward capturedForward = context.forwarded().get(0);
assertThat(capturedForward.record(), is(new Record<>(key, value, timestamp)));
}
use of org.apache.kafka.common.serialization.Serdes.String in project kafka by apache.
the class KTableSuppressProcessorTest method suppressShouldShutDownWhenOverRecordCapacity.
@Test
public void suppressShouldShutDownWhenOverRecordCapacity() {
final Harness<String, Long> harness = new Harness<>(untilTimeLimit(Duration.ofDays(100), maxRecords(1).shutDownWhenFull()), String(), Long());
final MockInternalNewProcessorContext<String, Change<Long>> context = harness.context;
final long timestamp = 100L;
context.setRecordMetadata("", 0, 0L);
context.setTimestamp(timestamp);
context.setCurrentNode(new ProcessorNode("testNode"));
final String key = "hey";
final Change<Long> value = new Change<>(null, ARBITRARY_LONG);
harness.processor.process(new Record<>(key, value, timestamp));
context.setRecordMetadata("", 0, 1L);
context.setTimestamp(timestamp);
try {
harness.processor.process(new Record<>("dummyKey", value, timestamp));
fail("expected an exception");
} catch (final StreamsException e) {
assertThat(e.getMessage(), containsString("buffer exceeded its max capacity"));
}
}
use of org.apache.kafka.common.serialization.Serdes.String in project kafka by apache.
the class KTableSuppressProcessorTest method finalResultsShouldDropTombstonesForTimeWindows.
/**
* It's desirable to drop tombstones for final-results windowed streams, since (as described in the
* {@link SuppressedInternal} javadoc), they are unnecessary to emit.
*/
@Test
public void finalResultsShouldDropTombstonesForTimeWindows() {
final Harness<Windowed<String>, Long> harness = new Harness<>(finalResults(ofMillis(0L)), timeWindowedSerdeFrom(String.class, 100L), Long());
final MockInternalNewProcessorContext<Windowed<String>, Change<Long>> context = harness.context;
final long timestamp = 100L;
context.setRecordMetadata("", 0, 0L);
context.setTimestamp(timestamp);
final Windowed<String> key = new Windowed<>("hey", new TimeWindow(0, 100L));
final Change<Long> value = new Change<>(null, ARBITRARY_LONG);
harness.processor.process(new Record<>(key, value, timestamp));
assertThat(context.forwarded(), hasSize(0));
}
use of org.apache.kafka.common.serialization.Serdes.String in project kafka by apache.
the class KTableSuppressProcessorTest method suppressShouldNotDropTombstonesForSessionWindows.
/**
* It's NOT OK to drop tombstones for non-final-results windowed streams, since we may have emitted some results for
* the window before getting the tombstone (see the {@link SuppressedInternal} javadoc).
*/
@Test
public void suppressShouldNotDropTombstonesForSessionWindows() {
final Harness<Windowed<String>, Long> harness = new Harness<>(untilTimeLimit(ofMillis(0), maxRecords(0)), sessionWindowedSerdeFrom(String.class), Long());
final MockInternalNewProcessorContext<Windowed<String>, Change<Long>> context = harness.context;
final long timestamp = 100L;
context.setRecordMetadata("", 0, 0L);
context.setTimestamp(timestamp);
final Windowed<String> key = new Windowed<>("hey", new SessionWindow(0L, 0L));
final Change<Long> value = new Change<>(null, ARBITRARY_LONG);
harness.processor.process(new Record<>(key, value, timestamp));
assertThat(context.forwarded(), hasSize(1));
final MockProcessorContext.CapturedForward capturedForward = context.forwarded().get(0);
assertThat(capturedForward.record(), is(new Record<>(key, value, timestamp)));
}
use of org.apache.kafka.common.serialization.Serdes.String in project kafka by apache.
the class KTableSuppressProcessorTest method suppressShouldNotDropTombstonesForTimeWindows.
/**
* It's NOT OK to drop tombstones for non-final-results windowed streams, since we may have emitted some results for
* the window before getting the tombstone (see the {@link SuppressedInternal} javadoc).
*/
@Test
public void suppressShouldNotDropTombstonesForTimeWindows() {
final Harness<Windowed<String>, Long> harness = new Harness<>(untilTimeLimit(ofMillis(0), maxRecords(0)), timeWindowedSerdeFrom(String.class, 100L), Long());
final MockInternalNewProcessorContext<Windowed<String>, Change<Long>> context = harness.context;
final long timestamp = 100L;
final Headers headers = new RecordHeaders().add("k", "v".getBytes(StandardCharsets.UTF_8));
context.setRecordMetadata("", 0, 0L);
context.setTimestamp(timestamp);
context.setHeaders(headers);
final Windowed<String> key = new Windowed<>("hey", new TimeWindow(0L, 100L));
final Change<Long> value = new Change<>(null, ARBITRARY_LONG);
harness.processor.process(new Record<>(key, value, timestamp));
assertThat(context.forwarded(), hasSize(1));
final MockProcessorContext.CapturedForward capturedForward = context.forwarded().get(0);
assertThat(capturedForward.record(), is(new Record<>(key, value, timestamp, headers)));
}
Aggregations