use of io.camunda.zeebe.protocol.impl.record.value.job.JobBatchRecord in project zeebe-process-test by camunda.
the class GrpcToLogStreamGateway method activateJobs.
@Override
public void activateJobs(final ActivateJobsRequest request, final StreamObserver<ActivateJobsResponse> responseObserver) {
executor.submit(() -> {
final Long requestId = registerNewRequest(responseObserver);
prepareRecordMetadata().requestId(requestId).valueType(ValueType.JOB_BATCH).intent(JobBatchIntent.ACTIVATE);
final JobBatchRecord jobBatchRecord = new JobBatchRecord();
jobBatchRecord.setType(request.getType());
jobBatchRecord.setWorker(request.getWorker());
jobBatchRecord.setTimeout(request.getTimeout());
jobBatchRecord.setMaxJobsToActivate(request.getMaxJobsToActivate());
writeCommandWithoutKey(recordMetadata, jobBatchRecord);
});
}
use of io.camunda.zeebe.protocol.impl.record.value.job.JobBatchRecord in project zeebe by camunda.
the class JobBatchActivateProcessor method rejectCommand.
private void rejectCommand(final TypedRecord<JobBatchRecord> record) {
final RejectionType rejectionType;
final String rejectionReason;
final JobBatchRecord value = record.getValue();
final var format = "Expected to activate job batch with %s to be %s, but it was %s";
if (value.getMaxJobsToActivate() < 1) {
rejectionType = RejectionType.INVALID_ARGUMENT;
rejectionReason = String.format(format, "max jobs to activate", "greater than zero", String.format("'%d'", value.getMaxJobsToActivate()));
} else if (value.getTimeout() < 1) {
rejectionType = RejectionType.INVALID_ARGUMENT;
rejectionReason = String.format(format, "timeout", "greater than zero", String.format("'%d'", value.getTimeout()));
} else if (value.getTypeBuffer().capacity() < 1) {
rejectionType = RejectionType.INVALID_ARGUMENT;
rejectionReason = String.format(format, "type", "present", "blank");
} else {
throw new IllegalStateException("Expected to reject an invalid activate job batch command, but it appears to be valid");
}
rejectionWriter.appendRejection(record, rejectionType, rejectionReason);
responseWriter.writeRejectionOnCommand(record, rejectionType, rejectionReason);
}
use of io.camunda.zeebe.protocol.impl.record.value.job.JobBatchRecord in project zeebe by camunda.
the class JobBatchCollector method collectJobs.
/**
* Collects jobs to be added to the given {@code record}. The jobs and their keys are added
* directly to the given record.
*
* <p>This method will fail only if it could not activate anything because the batch would be too
* large, but there was at least one job to activate. On failure, it will return that job and its
* key. On success, it will return the amount of jobs activated.
*
* @param record the batch activate command; jobs and their keys will be added directly into it
* @return the amount of activated jobs on success, or a job which was too large to activate
*/
Either<TooLargeJob, Integer> collectJobs(final TypedRecord<JobBatchRecord> record) {
final JobBatchRecord value = record.getValue();
final ValueArray<JobRecord> jobIterator = value.jobs();
final ValueArray<LongValue> jobKeyIterator = value.jobKeys();
final Collection<DirectBuffer> requestedVariables = collectVariableNames(value);
final var maxActivatedCount = value.getMaxJobsToActivate();
final var activatedCount = new MutableInteger(0);
final var jobCopyBuffer = new ExpandableArrayBuffer();
final var unwritableJob = new MutableReference<TooLargeJob>();
jobState.forEachActivatableJobs(value.getTypeBuffer(), (key, jobRecord) -> {
// fill in the job record properties first in order to accurately estimate its size before
// adding it to the batch
final var deadline = record.getTimestamp() + value.getTimeout();
jobRecord.setDeadline(deadline).setWorker(value.getWorkerBuffer());
setJobVariables(requestedVariables, jobRecord, jobRecord.getElementInstanceKey());
// the expected length is based on the current record's length plus the length of the job
// record we would add to the batch, the number of bytes taken by the additional job key,
// as well as one byte required per job key for its type header. if we ever add more, this
// should be updated accordingly.
final var jobRecordLength = jobRecord.getLength();
final var expectedEventLength = record.getLength() + jobRecordLength + Long.BYTES + 1;
if (activatedCount.value <= maxActivatedCount && canWriteEventOfLength.test(expectedEventLength)) {
appendJobToBatch(jobIterator, jobKeyIterator, jobCopyBuffer, key, jobRecord);
activatedCount.increment();
} else {
// activate it
if (activatedCount.value == 0) {
unwritableJob.set(new TooLargeJob(key, jobRecord));
}
value.setTruncated(true);
return false;
}
return activatedCount.value < maxActivatedCount;
});
if (unwritableJob.ref != null) {
return Either.left(unwritableJob.ref);
}
return Either.right(activatedCount.value);
}
use of io.camunda.zeebe.protocol.impl.record.value.job.JobBatchRecord in project zeebe by camunda.
the class JobBatchCollectorTest method shouldAppendJobKeyToBatchRecord.
@Test
void shouldAppendJobKeyToBatchRecord() {
// given - multiple jobs to ensure variables are collected based on the scope
final TypedRecord<JobBatchRecord> record = createRecord();
final long scopeKey = state.getKeyGenerator().nextKey();
final List<Job> jobs = Arrays.asList(createJob(scopeKey), createJob(scopeKey));
// when
collector.collectJobs(record);
// then
final JobBatchRecord batchRecord = record.getValue();
JobBatchRecordValueAssert.assertThat(batchRecord).hasJobKeys(jobs.get(0).key, jobs.get(1).key);
}
use of io.camunda.zeebe.protocol.impl.record.value.job.JobBatchRecord in project zeebe by camunda.
the class JobBatchCollectorTest method shouldActivateUpToMaxJobs.
@Test
void shouldActivateUpToMaxJobs() {
// given
final TypedRecord<JobBatchRecord> record = createRecord();
final long scopeKey = state.getKeyGenerator().nextKey();
final List<Job> jobs = Arrays.asList(createJob(scopeKey), createJob(scopeKey));
record.getValue().setMaxJobsToActivate(1);
// when
final Either<TooLargeJob, Integer> result = collector.collectJobs(record);
// then
final JobBatchRecord batchRecord = record.getValue();
EitherAssert.assertThat(result).as("should collect only the first job").right().isEqualTo(1);
JobBatchRecordValueAssert.assertThat(batchRecord).hasJobKeys(jobs.get(0).key).isNotTruncated();
}
Aggregations