use of org.apache.kafka.test.MockRecordCollector in project kafka by apache.
the class SegmentIteratorTest method before.
@SuppressWarnings("rawtypes")
@Before
public void before() {
final InternalMockProcessorContext context = new InternalMockProcessorContext<>(TestUtils.tempDirectory(), Serdes.String(), Serdes.String(), new MockRecordCollector(), new ThreadCache(new LogContext("testCache "), 0, new MockStreamsMetrics(new Metrics())));
segmentOne.init((StateStoreContext) context, segmentOne);
segmentTwo.init((StateStoreContext) context, segmentTwo);
segmentOne.put(Bytes.wrap("a".getBytes()), "1".getBytes());
segmentOne.put(Bytes.wrap("b".getBytes()), "2".getBytes());
segmentTwo.put(Bytes.wrap("c".getBytes()), "3".getBytes());
segmentTwo.put(Bytes.wrap("d".getBytes()), "4".getBytes());
}
use of org.apache.kafka.test.MockRecordCollector in project kafka by apache.
the class AbstractSessionBytesStoreTest method setUp.
@Before
public void setUp() {
sessionStore = buildSessionStore(RETENTION_PERIOD, Serdes.String(), Serdes.Long());
recordCollector = new MockRecordCollector();
context = new InternalMockProcessorContext<>(TestUtils.tempDirectory(), Serdes.String(), Serdes.Long(), recordCollector, new ThreadCache(new LogContext("testCache"), 0, new MockStreamsMetrics(new Metrics())));
context.setTime(1L);
sessionStore.init((StateStoreContext) context, sessionStore);
}
use of org.apache.kafka.test.MockRecordCollector in project kafka by apache.
the class TimeOrderedKeyValueBufferTest method shouldFlush.
@Test
public void shouldFlush() {
final TimeOrderedKeyValueBuffer<String, String> buffer = bufferSupplier.apply(testName);
final MockInternalProcessorContext context = makeContext();
buffer.init((StateStoreContext) context, buffer);
putRecord(buffer, context, 2L, 0L, "asdf", "2093j");
putRecord(buffer, context, 1L, 1L, "zxcv", "3gon4i");
putRecord(buffer, context, 0L, 2L, "deleteme", "deadbeef");
// replace "deleteme" with a tombstone
buffer.evictWhile(() -> buffer.minTimestamp() < 1, kv -> {
});
// flush everything to the changelog
buffer.flush();
// the buffer should serialize the buffer time and the value as byte[],
// which we can't compare for equality using ProducerRecord.
// As a workaround, I'm deserializing them and shoving them in a KeyValue, just for ease of testing.
final List<ProducerRecord<String, KeyValue<Long, BufferValue>>> collected = ((MockRecordCollector) context.recordCollector()).collected().stream().map(pr -> {
final KeyValue<Long, BufferValue> niceValue;
if (pr.value() == null) {
niceValue = null;
} else {
final byte[] serializedValue = (byte[]) pr.value();
final ByteBuffer valueBuffer = ByteBuffer.wrap(serializedValue);
final BufferValue contextualRecord = BufferValue.deserialize(valueBuffer);
final long timestamp = valueBuffer.getLong();
niceValue = new KeyValue<>(timestamp, contextualRecord);
}
return new ProducerRecord<>(pr.topic(), pr.partition(), pr.timestamp(), pr.key().toString(), niceValue, pr.headers());
}).collect(Collectors.toList());
assertThat(collected, is(asList(new ProducerRecord<>(APP_ID + "-" + testName + "-changelog", // Producer will assign
0, null, "deleteme", null, new RecordHeaders()), new ProducerRecord<>(APP_ID + "-" + testName + "-changelog", 0, null, "zxcv", new KeyValue<>(1L, getBufferValue("3gon4i", 1)), CHANGELOG_HEADERS), new ProducerRecord<>(APP_ID + "-" + testName + "-changelog", 0, null, "asdf", new KeyValue<>(2L, getBufferValue("2093j", 0)), CHANGELOG_HEADERS))));
cleanup(context, buffer);
}
use of org.apache.kafka.test.MockRecordCollector in project kafka by apache.
the class AbstractRocksDBSegmentedBytesStoreTest method before.
@Before
public void before() {
if (schema instanceof SessionKeySchema) {
windows[0] = new SessionWindow(10L, 10L);
windows[1] = new SessionWindow(500L, 1000L);
windows[2] = new SessionWindow(1_000L, 1_500L);
windows[3] = new SessionWindow(30_000L, 60_000L);
// All four of the previous windows will go into segment 1.
// The nextSegmentWindow is computed be a high enough time that when it gets written
// to the segment store, it will advance stream time past the first segment's retention time and
// expire it.
nextSegmentWindow = new SessionWindow(segmentInterval + retention, segmentInterval + retention);
}
if (schema instanceof WindowKeySchema) {
windows[0] = timeWindowForSize(10L, windowSizeForTimeWindow);
windows[1] = timeWindowForSize(500L, windowSizeForTimeWindow);
windows[2] = timeWindowForSize(1_000L, windowSizeForTimeWindow);
windows[3] = timeWindowForSize(60_000L, windowSizeForTimeWindow);
// All four of the previous windows will go into segment 1.
// The nextSegmentWindow is computed be a high enough time that when it gets written
// to the segment store, it will advance stream time past the first segment's retention time and
// expire it.
nextSegmentWindow = timeWindowForSize(segmentInterval + retention, windowSizeForTimeWindow);
}
bytesStore = getBytesStore();
stateDir = TestUtils.tempDirectory();
context = new InternalMockProcessorContext<>(stateDir, Serdes.String(), Serdes.Long(), new MockRecordCollector(), new ThreadCache(new LogContext("testCache "), 0, new MockStreamsMetrics(new Metrics())));
bytesStore.init((StateStoreContext) context, bytesStore);
}
use of org.apache.kafka.test.MockRecordCollector in project kafka by apache.
the class AbstractWindowBytesStoreTest method setup.
@Before
public void setup() {
windowStore = buildWindowStore(RETENTION_PERIOD, WINDOW_SIZE, false, Serdes.Integer(), Serdes.String());
recordCollector = new MockRecordCollector();
context = new InternalMockProcessorContext<>(baseDir, Serdes.String(), Serdes.Integer(), recordCollector, new ThreadCache(new LogContext("testCache"), 0, new MockStreamsMetrics(new Metrics())));
context.setTime(1L);
windowStore.init((StateStoreContext) context, windowStore);
}
Aggregations