use of io.camunda.zeebe.protocol.record.Record in project zeebe by camunda.
the class MappingIncidentTest method shouldResolveIncidentAfterPreviousResolvingFailed.
@Test
public void shouldResolveIncidentAfterPreviousResolvingFailed() {
// given
ENGINE.deployment().withXmlResource(PROCESS_INPUT_MAPPING).deploy();
final long processInstanceKey = ENGINE.processInstance().ofBpmnProcessId("process").create();
final Record failureEvent = RecordingExporter.processInstanceRecords().withElementId("failingTask").withIntent(ProcessInstanceIntent.ELEMENT_ACTIVATING).withProcessInstanceKey(processInstanceKey).getFirst();
final Record firstIncident = RecordingExporter.incidentRecords().withProcessInstanceKey(processInstanceKey).withIntent(IncidentIntent.CREATED).getFirst();
ENGINE.variables().ofScope(failureEvent.getKey()).withDocument(new HashMap<>()).update();
ENGINE.incident().ofInstance(processInstanceKey).withKey(firstIncident.getKey()).resolve();
final Record<IncidentRecordValue> secondIncident = RecordingExporter.incidentRecords().onlyEvents().withProcessInstanceKey(processInstanceKey).skipUntil(e -> e.getIntent() == RESOLVED).withIntent(IncidentIntent.CREATED).getFirst();
// when
ENGINE.variables().ofScope(failureEvent.getKey()).withDocument(VARIABLES).update();
final Record<IncidentRecordValue> secondResolvedIncident = ENGINE.incident().ofInstance(processInstanceKey).withKey(secondIncident.getKey()).resolve();
// then
assertThat(secondResolvedIncident.getKey()).isGreaterThan(firstIncident.getKey());
Assertions.assertThat(secondResolvedIncident.getValue()).hasErrorType(ErrorType.IO_MAPPING_ERROR).hasBpmnProcessId("process").hasProcessInstanceKey(processInstanceKey).hasElementId("failingTask").hasElementInstanceKey(failureEvent.getKey()).hasVariableScopeKey(failureEvent.getKey());
assertThat(secondResolvedIncident.getValue().getErrorMessage()).contains("no variable found for name 'foo'");
}
use of io.camunda.zeebe.protocol.record.Record in project zeebe by camunda.
the class MultiInstanceIncidentTest method shouldCreateIncidentWhenInputCollectionModifiedConcurrently.
/**
* This test is a bit more complex then shouldResolveIncidentDueToInputCollection, because it
* tests a parallel multi-instance body that is about to activate, but while it's activating (and
* before it's children activate) the input collection is modified. This should result in
* incidents on each of the children's activations, which can be resolved individually.
*/
@Test
public void shouldCreateIncidentWhenInputCollectionModifiedConcurrently() {
// given
final var process = Bpmn.createExecutableProcess("multi-task").startEvent().serviceTask(ELEMENT_ID, t -> t.zeebeJobType(jobType)).sequenceFlowId("from-task-to-multi-instance").serviceTask("multi-instance", t -> t.zeebeJobType(jobType)).multiInstance(b -> b.parallel().zeebeInputCollectionExpression(INPUT_COLLECTION).zeebeInputElement(INPUT_ELEMENT)).endEvent().done();
final var deployment = ENGINE.deployment().withXmlResource(process).deploy();
final long processInstanceKey = ENGINE.processInstance().ofBpmnProcessId("multi-task").withVariable(INPUT_COLLECTION, List.of(1, 2, 3)).create();
final var serviceTask = RecordingExporter.processInstanceRecords(ProcessInstanceIntent.ELEMENT_ACTIVATED).withProcessInstanceKey(processInstanceKey).withElementType(BpmnElementType.SERVICE_TASK).getFirst();
final var job = findNthJob(processInstanceKey, 1);
final var nextKey = ENGINE.getZeebeState().getKeyGenerator().nextKey();
ENGINE.stop();
RecordingExporter.reset();
// when
final ProcessInstanceRecord sequenceFlow = Records.processInstance(processInstanceKey, "multi-task").setBpmnElementType(BpmnElementType.SEQUENCE_FLOW).setElementId("from-task-to-multi-instance").setFlowScopeKey(processInstanceKey).setProcessDefinitionKey(deployment.getValue().getProcessesMetadata().get(0).getProcessDefinitionKey());
final ProcessInstanceRecord multiInstanceBody = Records.processInstance(processInstanceKey, "multi-task").setBpmnElementType(BpmnElementType.MULTI_INSTANCE_BODY).setElementId("multi-instance").setFlowScopeKey(processInstanceKey).setProcessDefinitionKey(deployment.getValue().getProcessesMetadata().get(0).getProcessDefinitionKey());
ENGINE.writeRecords(RecordToWrite.command().key(job.getKey()).job(JobIntent.COMPLETE, job.getValue()), RecordToWrite.event().causedBy(0).key(job.getKey()).job(JobIntent.COMPLETED, job.getValue()), RecordToWrite.command().causedBy(0).key(serviceTask.getKey()).processInstance(ProcessInstanceIntent.COMPLETE_ELEMENT, serviceTask.getValue()), RecordToWrite.event().causedBy(2).key(serviceTask.getKey()).processInstance(ProcessInstanceIntent.ELEMENT_COMPLETING, serviceTask.getValue()), RecordToWrite.event().causedBy(2).key(serviceTask.getKey()).processInstance(ProcessInstanceIntent.ELEMENT_COMPLETED, serviceTask.getValue()), RecordToWrite.event().causedBy(2).key(nextKey).processInstance(ProcessInstanceIntent.SEQUENCE_FLOW_TAKEN, sequenceFlow), RecordToWrite.command().causedBy(2).processInstance(ProcessInstanceIntent.ACTIVATE_ELEMENT, multiInstanceBody), RecordToWrite.command().variable(VariableDocumentIntent.UPDATE, Records.variableDocument(processInstanceKey, "{\"items\":0}")));
ENGINE.start();
// then
final var incidents = RecordingExporter.incidentRecords(IncidentIntent.CREATED).withProcessInstanceKey(processInstanceKey).limit(3).asList();
assertThat(incidents).describedAs("Should create incident for each child when input element cannot be retrieved from input collection").extracting(Record::getValue).extracting(IncidentRecordValue::getElementId, IncidentRecordValue::getErrorType, IncidentRecordValue::getErrorMessage).containsOnly(tuple("multi-instance", ErrorType.EXTRACT_VALUE_ERROR, "Expected result of the expression 'items' to be 'ARRAY', but was 'NUMBER'."));
ENGINE.variables().ofScope(processInstanceKey).withDocument(Collections.singletonMap(INPUT_COLLECTION, List.of(1, 2, 3))).update();
incidents.forEach(i -> ENGINE.incident().ofInstance(processInstanceKey).withKey(i.getKey()).resolve());
completeNthJob(processInstanceKey, 2);
completeNthJob(processInstanceKey, 3);
completeNthJob(processInstanceKey, 4);
RecordingExporter.processInstanceRecords(ProcessInstanceIntent.ELEMENT_COMPLETED).withProcessInstanceKey(processInstanceKey).withElementType(BpmnElementType.PROCESS).await();
}
use of io.camunda.zeebe.protocol.record.Record in project zeebe by camunda.
the class ExporterDirectorTest method shouldApplyRecordFilter.
@Test
public void shouldApplyRecordFilter() {
// given
exporters.get(0).onConfigure(withFilter(Arrays.asList(RecordType.COMMAND, RecordType.EVENT), Collections.singletonList(ValueType.DEPLOYMENT)));
exporters.get(1).onConfigure(withFilter(Collections.singletonList(RecordType.EVENT), Arrays.asList(ValueType.DEPLOYMENT, ValueType.JOB)));
startExporterDirector(exporterDescriptors);
// when
final long deploymentCommand = rule.writeCommand(DeploymentIntent.CREATE, new DeploymentRecord());
final long deploymentEvent = rule.writeEvent(DeploymentIntent.CREATED, new DeploymentRecord());
rule.writeEvent(IncidentIntent.CREATED, new IncidentRecord());
final long jobEvent = rule.writeEvent(JobIntent.CREATED, new JobRecord());
// then
waitUntil(() -> exporters.get(1).getExportedRecords().size() == 2);
assertThat(exporters.get(0).getExportedRecords()).extracting(Record::getPosition).hasSize(2).contains(deploymentCommand, deploymentEvent);
assertThat(exporters.get(1).getExportedRecords()).extracting(Record::getPosition).hasSize(2).contains(deploymentEvent, jobEvent);
}
use of io.camunda.zeebe.protocol.record.Record in project zeebe by camunda.
the class AvailabilityTest method shouldCreateProcessWhenOnePartitionDown.
@Test
public void shouldCreateProcessWhenOnePartitionDown() {
final BrokerInfo leaderForPartition = clusteringRule.getLeaderForPartition(partitionCount);
// when
clusteringRule.stopBroker(leaderForPartition.getNodeId());
for (int i = 0; i < 2 * partitionCount; i++) {
clientRule.createProcessInstance(processDefinitionKey);
}
// then
final List<Integer> partitionIds = RecordingExporter.processInstanceCreationRecords().withIntent(ProcessInstanceCreationIntent.CREATED).map(Record::getPartitionId).limit(2 * partitionCount).collect(Collectors.toList());
assertThat(partitionIds).hasSize(2 * partitionCount);
assertThat(partitionIds).containsExactlyInAnyOrder(1, 1, 1, 2, 2, 2);
}
use of io.camunda.zeebe.protocol.record.Record in project zeebe by camunda.
the class MultiInstanceActivityTest method shouldApplyInputMapping.
@Test
public void shouldApplyInputMapping() {
// given
final ServiceTask task = process(miBuilder).getModelElementById(ELEMENT_ID);
final var process = task.builder().zeebeInputExpression(INPUT_ELEMENT_VARIABLE, "x").zeebeInputExpression("loopCounter", "y").done();
ENGINE.deployment().withXmlResource(process).deploy();
// when
final long processInstanceKey = ENGINE.processInstance().ofBpmnProcessId(PROCESS_ID).withVariable(INPUT_COLLECTION_EXPRESSION, INPUT_COLLECTION).create();
completeJobs(processInstanceKey, INPUT_COLLECTION.size());
// then
final var elementInstanceKeys = RecordingExporter.processInstanceRecords(ProcessInstanceIntent.ELEMENT_ACTIVATED).withProcessInstanceKey(processInstanceKey).withElementId(ELEMENT_ID).withElementType(BpmnElementType.SERVICE_TASK).limit(3).map(Record::getKey).collect(Collectors.toList());
assertThat(RecordingExporter.records().limitToProcessInstance(processInstanceKey).variableRecords().withProcessInstanceKey(processInstanceKey)).extracting(Record::getValue).extracting(v -> tuple(v.getScopeKey(), v.getName(), v.getValue())).contains(tuple(elementInstanceKeys.get(0), "x", JsonUtil.toJson(INPUT_COLLECTION.get(0))), tuple(elementInstanceKeys.get(0), "y", "1"), tuple(elementInstanceKeys.get(1), "x", JsonUtil.toJson(INPUT_COLLECTION.get(1))), tuple(elementInstanceKeys.get(1), "y", "2"), tuple(elementInstanceKeys.get(2), "x", JsonUtil.toJson(INPUT_COLLECTION.get(2))), tuple(elementInstanceKeys.get(2), "y", "3"));
}
Aggregations