use of org.folio.rest.jaxrs.model.DataImportEventTypes in project mod-source-record-manager by folio-org.
the class RecordProcessedEventHandlingServiceImpl method handle.
@Override
public Future<Boolean> handle(String eventContent, OkapiConnectionParams params) {
Promise<Boolean> promise = Promise.promise();
DataImportEventPayload dataImportEventPayload;
try {
dataImportEventPayload = new ObjectMapper().readValue(eventContent, DataImportEventPayload.class);
} catch (IOException e) {
LOGGER.error("Failed to read eventContent {}", eventContent, e);
promise.fail(e);
return promise.future();
}
String jobExecutionId = dataImportEventPayload.getJobExecutionId();
try {
DataImportEventTypes eventType = DataImportEventTypes.valueOf(dataImportEventPayload.getEventType());
jobExecutionProgressService.updateJobExecutionProgress(jobExecutionId, progress -> changeProgressAccordingToEventType(progress, eventType), params.getTenantId()).compose(updatedProgress -> updateJobExecutionIfAllRecordsProcessed(jobExecutionId, updatedProgress, params)).onComplete(ar -> {
if (ar.failed()) {
LOGGER.error("Failed to handle {} event", eventType, ar.cause());
updateJobStatusToError(jobExecutionId, params).onComplete(statusAr -> promise.fail(ar.cause()));
} else {
promise.complete(true);
}
});
} catch (Exception e) {
LOGGER.error("Failed to handle event {}", eventContent, e);
updateJobStatusToError(jobExecutionId, params);
promise.fail(e);
}
return promise.future();
}
use of org.folio.rest.jaxrs.model.DataImportEventTypes in project mod-source-record-manager by folio-org.
the class StoredRecordChunksKafkaHandler method handle.
@Override
public Future<String> handle(KafkaConsumerRecord<String, String> record) {
List<KafkaHeader> kafkaHeaders = record.headers();
OkapiConnectionParams okapiConnectionParams = new OkapiConnectionParams(KafkaHeaderUtils.kafkaHeadersToMap(kafkaHeaders), vertx);
String chunkId = okapiConnectionParams.getHeaders().get("chunkId");
String chunkNumber = okapiConnectionParams.getHeaders().get("chunkNumber");
String jobExecutionId = okapiConnectionParams.getHeaders().get("jobExecutionId");
Event event = Json.decodeValue(record.value(), Event.class);
try {
return eventProcessedService.collectData(STORED_RECORD_CHUNKS_KAFKA_HANDLER_UUID, event.getId(), okapiConnectionParams.getTenantId()).compose(res -> {
RecordsBatchResponse recordsBatchResponse = Json.decodeValue(event.getEventPayload(), RecordsBatchResponse.class);
List<Record> storedRecords = recordsBatchResponse.getRecords();
// we only know record type by inspecting the records, assuming records are homogeneous type and defaulting to previous static value
DataImportEventTypes eventType = !storedRecords.isEmpty() && RECORD_TYPE_TO_EVENT_TYPE.containsKey(storedRecords.get(0).getRecordType()) ? RECORD_TYPE_TO_EVENT_TYPE.get(storedRecords.get(0).getRecordType()) : DI_SRS_MARC_BIB_RECORD_CREATED;
LOGGER.debug("RecordsBatchResponse has been received, starting processing chunkId: {} chunkNumber: {} jobExecutionId: {}", chunkId, chunkNumber, jobExecutionId);
saveCreatedRecordsInfoToDataImportLog(storedRecords, okapiConnectionParams.getTenantId());
return recordsPublishingService.sendEventsWithRecords(storedRecords, jobExecutionId, okapiConnectionParams, eventType.value()).compose(b -> {
LOGGER.debug("RecordsBatchResponse processing has been completed chunkId: {} chunkNumber: {} jobExecutionId: {}", chunkId, chunkNumber, jobExecutionId);
return Future.succeededFuture(chunkId);
}, th -> {
LOGGER.error("RecordsBatchResponse processing has failed with errors chunkId: {} chunkNumber: {} jobExecutionId: {}", chunkId, chunkNumber, jobExecutionId, th);
return Future.failedFuture(th);
});
});
} catch (Exception e) {
LOGGER.error("Can't process kafka record: ", e);
return Future.failedFuture(e);
}
}
Aggregations