use of org.agrona.collections.MutableInteger in project zilla by aklivity.
the class KafkaFunctionsTest method shouldGenerateMergedBeginExtension.
@Test
public void shouldGenerateMergedBeginExtension() {
byte[] build = KafkaFunctions.beginEx().typeId(0x01).merged().topic("topic").partition(0, 1L).filter().key("match").build().filter().header("name", "value").build().build().build();
DirectBuffer buffer = new UnsafeBuffer(build);
KafkaBeginExFW beginEx = new KafkaBeginExFW().wrap(buffer, 0, buffer.capacity());
assertEquals(0x01, beginEx.typeId());
assertEquals(KafkaApi.MERGED.value(), beginEx.kind());
final KafkaMergedBeginExFW mergedBeginEx = beginEx.merged();
assertEquals("topic", mergedBeginEx.topic().asString());
assertNotNull(mergedBeginEx.partitions().matchFirst(p -> p.partitionId() == 0 && p.partitionOffset() == 1L));
final MutableInteger filterCount = new MutableInteger();
mergedBeginEx.filters().forEach(f -> filterCount.value++);
assertEquals(2, filterCount.value);
assertNotNull(mergedBeginEx.filters().matchFirst(f -> f.conditions().matchFirst(c -> c.kind() == KEY.value() && "match".equals(c.key().value().get((b, o, m) -> b.getStringWithoutLengthUtf8(o, m - o)))) != null));
assertNotNull(mergedBeginEx.filters().matchFirst(f -> f.conditions().matchFirst(c -> c.kind() == HEADER.value() && "name".equals(c.header().name().get((b, o, m) -> b.getStringWithoutLengthUtf8(o, m - o))) && "value".equals(c.header().value().get((b, o, m) -> b.getStringWithoutLengthUtf8(o, m - o)))) != null));
}
use of org.agrona.collections.MutableInteger in project zilla by aklivity.
the class KafkaFunctionsTest method shouldGenerateMergedDataExtensionWithNullKeyAndNullByteArrayHeaderValue.
@Test
public void shouldGenerateMergedDataExtensionWithNullKeyAndNullByteArrayHeaderValue() {
byte[] build = KafkaFunctions.dataEx().typeId(0x01).merged().timestamp(12345678L).partition(0, 0L).progress(0, 1L).key(null).headerBytes("name", null).build().build();
DirectBuffer buffer = new UnsafeBuffer(build);
KafkaDataExFW dataEx = new KafkaDataExFW().wrap(buffer, 0, buffer.capacity());
assertEquals(0x01, dataEx.typeId());
assertEquals(KafkaApi.MERGED.value(), dataEx.kind());
final KafkaMergedDataExFW mergedDataEx = dataEx.merged();
assertEquals(12345678L, mergedDataEx.timestamp());
final KafkaOffsetFW partition = mergedDataEx.partition();
assertEquals(0, partition.partitionId());
assertEquals(0L, partition.partitionOffset());
final MutableInteger progressCount = new MutableInteger();
mergedDataEx.progress().forEach(f -> progressCount.value++);
assertEquals(1, progressCount.value);
assertNotNull(mergedDataEx.progress().matchFirst(p -> p.partitionId() == 0 && p.partitionOffset() == 1L));
assertNull(mergedDataEx.key().value());
final MutableInteger headersCount = new MutableInteger();
mergedDataEx.headers().forEach(f -> headersCount.value++);
assertEquals(1, headersCount.value);
assertNotNull(mergedDataEx.headers().matchFirst(h -> "name".equals(h.name().get((b, o, m) -> b.getStringWithoutLengthUtf8(o, m - o))) && Objects.isNull(h.value())));
}
use of org.agrona.collections.MutableInteger in project zilla by aklivity.
the class KafkaFunctionsTest method shouldGenerateMergedDataExtensionWithShortValue.
@Test
public void shouldGenerateMergedDataExtensionWithShortValue() {
byte[] build = KafkaFunctions.dataEx().typeId(0x01).merged().timestamp(12345678L).partition(0, 0L).progress(0, 1L).key("match").headerShort("name", (short) 1).build().build();
DirectBuffer buffer = new UnsafeBuffer(build);
KafkaDataExFW dataEx = new KafkaDataExFW().wrap(buffer, 0, buffer.capacity());
assertEquals(0x01, dataEx.typeId());
assertEquals(KafkaApi.MERGED.value(), dataEx.kind());
final KafkaMergedDataExFW mergedDataEx = dataEx.merged();
assertEquals(12345678L, mergedDataEx.timestamp());
final KafkaOffsetFW partition = mergedDataEx.partition();
assertEquals(0, partition.partitionId());
assertEquals(0L, partition.partitionOffset());
final MutableInteger progressCount = new MutableInteger();
mergedDataEx.progress().forEach(f -> progressCount.value++);
assertEquals(1, progressCount.value);
assertNotNull(mergedDataEx.progress().matchFirst(p -> p.partitionId() == 0 && p.partitionOffset() == 1L));
assertEquals("match", mergedDataEx.key().value().get((b, o, m) -> b.getStringWithoutLengthUtf8(o, m - o)));
final MutableInteger headersCount = new MutableInteger();
mergedDataEx.headers().forEach(f -> headersCount.value++);
assertEquals(1, headersCount.value);
assertNotNull(mergedDataEx.headers().matchFirst(h -> "name".equals(h.name().get((b, o, m) -> b.getStringWithoutLengthUtf8(o, m - o))) && h.value().get((b, o, m) -> b.getShort(o)) == (short) 1) != null);
}
use of org.agrona.collections.MutableInteger in project zilla by aklivity.
the class KafkaFunctionsTest method shouldGenerateMergedDataExtensionWithNullValue.
@Test
public void shouldGenerateMergedDataExtensionWithNullValue() {
byte[] build = KafkaFunctions.dataEx().typeId(0x01).merged().timestamp(12345678L).partition(0, 0L).progress(0, 1L).key("match").headerNull("name").build().build();
DirectBuffer buffer = new UnsafeBuffer(build);
KafkaDataExFW dataEx = new KafkaDataExFW().wrap(buffer, 0, buffer.capacity());
assertEquals(0x01, dataEx.typeId());
assertEquals(KafkaApi.MERGED.value(), dataEx.kind());
final KafkaMergedDataExFW mergedDataEx = dataEx.merged();
assertEquals(12345678L, mergedDataEx.timestamp());
final KafkaOffsetFW partition = mergedDataEx.partition();
assertEquals(0, partition.partitionId());
assertEquals(0L, partition.partitionOffset());
final MutableInteger progressCount = new MutableInteger();
mergedDataEx.progress().forEach(f -> progressCount.value++);
assertEquals(1, progressCount.value);
assertNotNull(mergedDataEx.progress().matchFirst(p -> p.partitionId() == 0 && p.partitionOffset() == 1L));
assertEquals("match", mergedDataEx.key().value().get((b, o, m) -> b.getStringWithoutLengthUtf8(o, m - o)));
final MutableInteger headersCount = new MutableInteger();
mergedDataEx.headers().forEach(f -> headersCount.value++);
assertEquals(1, headersCount.value);
assertNotNull(mergedDataEx.headers().matchFirst(h -> "name".equals(h.name().get((b, o, m) -> b.getStringWithoutLengthUtf8(o, m - o))) && Objects.isNull(h.value())));
}
use of org.agrona.collections.MutableInteger in project zeebe by camunda.
the class JobBatchCollector method collectJobs.
/**
* Collects jobs to be added to the given {@code record}. The jobs and their keys are added
* directly to the given record.
*
* <p>This method will fail only if it could not activate anything because the batch would be too
* large, but there was at least one job to activate. On failure, it will return that job and its
* key. On success, it will return the amount of jobs activated.
*
* @param record the batch activate command; jobs and their keys will be added directly into it
* @return the amount of activated jobs on success, or a job which was too large to activate
*/
Either<TooLargeJob, Integer> collectJobs(final TypedRecord<JobBatchRecord> record) {
final JobBatchRecord value = record.getValue();
final ValueArray<JobRecord> jobIterator = value.jobs();
final ValueArray<LongValue> jobKeyIterator = value.jobKeys();
final Collection<DirectBuffer> requestedVariables = collectVariableNames(value);
final var maxActivatedCount = value.getMaxJobsToActivate();
final var activatedCount = new MutableInteger(0);
final var jobCopyBuffer = new ExpandableArrayBuffer();
final var unwritableJob = new MutableReference<TooLargeJob>();
jobState.forEachActivatableJobs(value.getTypeBuffer(), (key, jobRecord) -> {
// fill in the job record properties first in order to accurately estimate its size before
// adding it to the batch
final var deadline = record.getTimestamp() + value.getTimeout();
jobRecord.setDeadline(deadline).setWorker(value.getWorkerBuffer());
setJobVariables(requestedVariables, jobRecord, jobRecord.getElementInstanceKey());
// the expected length is based on the current record's length plus the length of the job
// record we would add to the batch, the number of bytes taken by the additional job key,
// as well as one byte required per job key for its type header. if we ever add more, this
// should be updated accordingly.
final var jobRecordLength = jobRecord.getLength();
final var expectedEventLength = record.getLength() + jobRecordLength + Long.BYTES + 1;
if (activatedCount.value <= maxActivatedCount && canWriteEventOfLength.test(expectedEventLength)) {
appendJobToBatch(jobIterator, jobKeyIterator, jobCopyBuffer, key, jobRecord);
activatedCount.increment();
} else {
// activate it
if (activatedCount.value == 0) {
unwritableJob.set(new TooLargeJob(key, jobRecord));
}
value.setTruncated(true);
return false;
}
return activatedCount.value < maxActivatedCount;
});
if (unwritableJob.ref != null) {
return Either.left(unwritableJob.ref);
}
return Either.right(activatedCount.value);
}
Aggregations