use of org.folio.rest.jaxrs.model.Event in project mod-source-record-manager by folio-org.
the class DataImportKafkaHandler method handle.
@Override
public Future<String> handle(KafkaConsumerRecord<String, String> record) {
try {
Promise<String> result = Promise.promise();
List<KafkaHeader> kafkaHeaders = record.headers();
OkapiConnectionParams okapiConnectionParams = new OkapiConnectionParams(KafkaHeaderUtils.kafkaHeadersToMap(kafkaHeaders), vertx);
String recordId = okapiConnectionParams.getHeaders().get(RECORD_ID_HEADER);
Event event = Json.decodeValue(record.value(), Event.class);
String jobExecutionId = extractJobExecutionId(kafkaHeaders);
LOGGER.info("Event was received with recordId: '{}' event type: '{}' with jobExecutionId: '{}'", recordId, event.getEventType(), jobExecutionId);
if (StringUtils.isBlank(recordId)) {
handleLocalEvent(result, okapiConnectionParams, event);
return result.future();
}
eventProcessedService.collectData(DATA_IMPORT_KAFKA_HANDLER_UUID, event.getId(), okapiConnectionParams.getTenantId()).onSuccess(res -> handleLocalEvent(result, okapiConnectionParams, event)).onFailure(e -> {
if (e instanceof DuplicateEventException) {
LOGGER.info(e.getMessage());
result.complete();
} else {
LOGGER.error("Error with database during collecting of deduplication info for handlerId: {} , eventId: {}. ", DATA_IMPORT_KAFKA_HANDLER_UUID, event.getId(), e);
result.fail(e);
}
});
return result.future();
} catch (Exception e) {
LOGGER.error("Error during processing data-import result", e);
return Future.failedFuture(e);
}
}
use of org.folio.rest.jaxrs.model.Event in project mod-source-record-manager by folio-org.
the class RawMarcChunksErrorHandler method handle.
@Override
public void handle(Throwable throwable, KafkaConsumerRecord<String, String> record) {
Event event = Json.decodeValue(record.value(), Event.class);
List<KafkaHeader> kafkaHeaders = record.headers();
OkapiConnectionParams okapiParams = new OkapiConnectionParams(KafkaHeaderUtils.kafkaHeadersToMap(kafkaHeaders), vertx);
String jobExecutionId = okapiParams.getHeaders().get(JOB_EXECUTION_ID_HEADER);
String chunkId = okapiParams.getHeaders().get(CHUNK_ID_HEADER);
String tenantId = okapiParams.getTenantId();
String lastChunk = okapiParams.getHeaders().get(LAST_CHUNK_HEADER);
if (StringUtils.isNotBlank(lastChunk)) {
LOGGER.error("Source chunk with jobExecutionId: {} , tenantId: {}, chunkId: {} marked as last, prevent sending DI error", jobExecutionId, tenantId, chunkId, throwable);
} else if (throwable instanceof RecordsPublishingException) {
List<Record> failedRecords = ((RecordsPublishingException) throwable).getFailedRecords();
for (Record failedRecord : failedRecords) {
sendDiErrorEvent(throwable, okapiParams, jobExecutionId, tenantId, failedRecord);
}
} else if (throwable instanceof DuplicateEventException) {
RawRecordsDto rawRecordsDto = Json.decodeValue(event.getEventPayload(), RawRecordsDto.class);
LOGGER.info("Duplicate event received, skipping parsing for jobExecutionId: {} , tenantId: {}, chunkId:{}, totalRecords: {}, cause: {}", jobExecutionId, tenantId, chunkId, rawRecordsDto.getInitialRecords().size(), throwable.getMessage());
} else if (throwable instanceof RawChunkRecordsParsingException) {
RawChunkRecordsParsingException exception = (RawChunkRecordsParsingException) throwable;
parsedRecordsErrorProvider.getParsedRecordsFromInitialRecords(okapiParams, jobExecutionId, exception.getRawRecordsDto()).onComplete(ar -> {
List<Record> parsedRecords = ar.result();
if (CollectionUtils.isNotEmpty(parsedRecords)) {
for (Record rec : parsedRecords) {
sendDiError(throwable, jobExecutionId, okapiParams, rec);
}
} else {
sendDiError(throwable, jobExecutionId, okapiParams, null);
}
});
} else {
sendDiErrorEvent(throwable, okapiParams, jobExecutionId, tenantId, null);
}
}
use of org.folio.rest.jaxrs.model.Event in project mod-source-record-manager by folio-org.
the class StoredRecordChunksErrorHandler method handle.
@Override
public void handle(Throwable throwable, KafkaConsumerRecord<String, String> kafkaConsumerRecord) {
List<KafkaHeader> kafkaHeaders = kafkaConsumerRecord.headers();
OkapiConnectionParams okapiParams = new OkapiConnectionParams(KafkaHeaderUtils.kafkaHeadersToMap(kafkaHeaders), vertx);
String jobExecutionId = okapiParams.getHeaders().get(JOB_EXECUTION_ID_HEADER);
// process for specific failure processed records from Exception body
if (throwable instanceof RecordsPublishingException) {
List<Record> failedRecords = ((RecordsPublishingException) throwable).getFailedRecords();
for (Record failedRecord : failedRecords) {
sendDiErrorForRecord(jobExecutionId, failedRecord, okapiParams, failedRecord.getErrorRecord().getDescription());
}
} else if (throwable instanceof DuplicateEventException) {
LOGGER.info(throwable.getMessage());
} else {
// process for all other cases that will include all records
Event event = Json.decodeValue(kafkaConsumerRecord.value(), Event.class);
RecordsBatchResponse recordCollection = Json.decodeValue(event.getEventPayload(), RecordsBatchResponse.class);
for (Record targetRecord : recordCollection.getRecords()) {
sendDiErrorForRecord(jobExecutionId, targetRecord, okapiParams, throwable.getMessage());
}
}
}
use of org.folio.rest.jaxrs.model.Event in project mod-source-record-manager by folio-org.
the class EventHandlingUtil method sendEventToKafka.
/**
* Prepares and sends event with payload to kafka
*
* @param tenantId tenant id
* @param eventPayload eventPayload in String representation
* @param eventType eventType
* @param kafkaHeaders kafka headers
* @param kafkaConfig kafka config
* @return completed future with true if event was sent successfully
*/
public static Future<Boolean> sendEventToKafka(String tenantId, String eventPayload, String eventType, List<KafkaHeader> kafkaHeaders, KafkaConfig kafkaConfig, String key) {
LOGGER.debug("Starting to send event to Kafka for eventType: {}", eventType);
Event event = createEvent(eventPayload, eventType, tenantId);
String topicName = createTopicName(eventType, tenantId, kafkaConfig);
KafkaProducerRecord<String, String> record = createProducerRecord(event, key, topicName, kafkaHeaders);
Promise<Boolean> promise = Promise.promise();
String chunkId = extractHeader(kafkaHeaders, "chunkId");
String recordId = extractHeader(kafkaHeaders, "recordId");
String producerName = eventType + "_Producer";
KafkaProducer<String, String> producer = KafkaProducer.createShared(Vertx.currentContext().owner(), producerName, kafkaConfig.getProducerProps());
producer.write(record, war -> {
producer.end(ear -> producer.close());
if (war.succeeded()) {
logSendingSucceeded(eventType, chunkId, recordId);
promise.complete(true);
} else {
Throwable cause = war.cause();
LOGGER.error("{} write error for event {}:", producerName, eventType, cause);
handleKafkaPublishingErrors(promise, eventPayload, cause);
}
});
return promise.future();
}
use of org.folio.rest.jaxrs.model.Event in project mod-source-record-manager by folio-org.
the class ChangeManagerAPITest method fillInRecordOrderIfAtLeastOneRecordHasNoOrder.
private void fillInRecordOrderIfAtLeastOneRecordHasNoOrder(String rawRecord) throws InterruptedException {
RawRecordsDto rawRecordsDto = new RawRecordsDto().withId(UUID.randomUUID().toString()).withRecordsMetadata(new RecordsMetadata().withLast(true).withCounter(7).withContentType(RecordsMetadata.ContentType.MARC_RAW)).withInitialRecords(asList(new InitialRecord().withRecord(CORRECT_RAW_RECORD_1), new InitialRecord().withRecord(CORRECT_RAW_RECORD_2).withOrder(5), new InitialRecord().withRecord(rawRecord).withOrder(6)));
InitJobExecutionsRsDto response = constructAndPostInitJobExecutionRqDto(1);
List<JobExecution> createdJobExecutions = response.getJobExecutions();
assertThat(createdJobExecutions.size(), is(1));
JobExecution jobExec = createdJobExecutions.get(0);
RestAssured.given().spec(spec).body(new JobProfileInfo().withName("MARC records").withId(DEFAULT_JOB_PROFILE_ID).withDataType(DataType.MARC)).when().put(JOB_EXECUTION_PATH + jobExec.getId() + JOB_PROFILE_PATH).then().statusCode(HttpStatus.SC_OK);
RestAssured.given().spec(spec).body(rawRecordsDto).when().post(JOB_EXECUTION_PATH + jobExec.getId() + RECORDS_PATH).then().statusCode(HttpStatus.SC_NO_CONTENT);
String topicToObserve = formatToKafkaTopicName(DI_RAW_RECORDS_CHUNK_PARSED.value());
List<String> observedValues = kafkaCluster.observeValues(ObserveKeyValues.on(topicToObserve, 1).observeFor(30, TimeUnit.SECONDS).build());
Event obtainedEvent = Json.decodeValue(observedValues.get(0), Event.class);
assertEquals(DI_RAW_RECORDS_CHUNK_PARSED.value(), obtainedEvent.getEventType());
RecordCollection processedRecords = Json.decodeValue(obtainedEvent.getEventPayload(), RecordCollection.class);
assertEquals(3, processedRecords.getRecords().size());
assertEquals(4, processedRecords.getRecords().get(0).getOrder().intValue());
assertEquals(5, processedRecords.getRecords().get(1).getOrder().intValue());
assertEquals(6, processedRecords.getRecords().get(2).getOrder().intValue());
}
Aggregations