use of io.aklivity.zilla.specs.binding.kafka.internal.types.stream.KafkaFlushExFW in project zilla by aklivity.
the class KafkaFunctionsTest method shouldGenerateFetchFlushExtensionWithLatestOffset.
@Test
public void shouldGenerateFetchFlushExtensionWithLatestOffset() {
byte[] build = KafkaFunctions.flushEx().typeId(0x01).fetch().partition(0, 1L, 1L).build().build();
DirectBuffer buffer = new UnsafeBuffer(build);
KafkaFlushExFW flushEx = new KafkaFlushExFW().wrap(buffer, 0, buffer.capacity());
assertEquals(0x01, flushEx.typeId());
final KafkaFetchFlushExFW fetchFlushEx = flushEx.fetch();
final KafkaOffsetFW partition = fetchFlushEx.partition();
assertEquals(0, partition.partitionId());
assertEquals(1L, partition.partitionOffset());
assertEquals(1L, partition.latestOffset());
}
use of io.aklivity.zilla.specs.binding.kafka.internal.types.stream.KafkaFlushExFW in project zilla by aklivity.
the class KafkaFunctionsTest method shouldGenerateFetchFlushExtension.
@Test
public void shouldGenerateFetchFlushExtension() {
byte[] build = KafkaFunctions.flushEx().typeId(0x01).fetch().partition(0, 1L).build().build();
DirectBuffer buffer = new UnsafeBuffer(build);
KafkaFlushExFW flushEx = new KafkaFlushExFW().wrap(buffer, 0, buffer.capacity());
assertEquals(0x01, flushEx.typeId());
final KafkaFetchFlushExFW fetchFlushEx = flushEx.fetch();
final KafkaOffsetFW partition = fetchFlushEx.partition();
assertEquals(0, partition.partitionId());
assertEquals(1L, partition.partitionOffset());
}
use of io.aklivity.zilla.specs.binding.kafka.internal.types.stream.KafkaFlushExFW in project zilla by aklivity.
the class KafkaFunctionsTest method shouldGenerateMergedFlushExtension.
@Test
public void shouldGenerateMergedFlushExtension() {
byte[] build = KafkaFunctions.flushEx().typeId(0x01).merged().progress(0, 1L).filter().key("match").build().filter().header("name", "value").build().build().build();
DirectBuffer buffer = new UnsafeBuffer(build);
KafkaFlushExFW flushEx = new KafkaFlushExFW().wrap(buffer, 0, buffer.capacity());
assertEquals(0x01, flushEx.typeId());
final KafkaMergedFlushExFW mergedFlushEx = flushEx.merged();
final MutableInteger partitionsCount = new MutableInteger();
mergedFlushEx.progress().forEach(f -> partitionsCount.value++);
assertEquals(1, partitionsCount.value);
assertNotNull(mergedFlushEx.progress().matchFirst(p -> p.partitionId() == 0 && p.partitionOffset() == 1L));
final MutableInteger filterCount = new MutableInteger();
mergedFlushEx.filters().forEach(f -> filterCount.value++);
assertEquals(2, filterCount.value);
assertNotNull(mergedFlushEx.filters().matchFirst(f -> f.conditions().matchFirst(c -> c.kind() == KEY.value() && "match".equals(c.key().value().get((b, o, m) -> b.getStringWithoutLengthUtf8(o, m - o)))) != null));
assertNotNull(mergedFlushEx.filters().matchFirst(f -> f.conditions().matchFirst(c -> c.kind() == HEADER.value() && "name".equals(c.header().name().get((b, o, m) -> b.getStringWithoutLengthUtf8(o, m - o))) && "value".equals(c.header().value().get((b, o, m) -> b.getStringWithoutLengthUtf8(o, m - o)))) != null));
}
Aggregations