use of org.folio.rest.jaxrs.model.Event in project mod-source-record-storage by folio-org.
the class ParsedRecordChunkConsumersVerticleTest method check_DI_ERROR_eventsSent.
private void check_DI_ERROR_eventsSent(String jobExecutionId, List<Record> records, String... errorMessages) throws InterruptedException {
List<DataImportEventPayload> testedEventsPayLoads = new ArrayList<>();
String observeTopic = KafkaTopicNameHelper.formatTopicName(kafkaConfig.getEnvId(), getDefaultNameSpace(), TENANT_ID, DI_ERROR.value());
List<String> observedValues = cluster.readValues(ReadKeyValues.from(observeTopic).build());
if (CollectionUtils.isEmpty(observedValues)) {
observedValues = cluster.observeValues(ObserveKeyValues.on(observeTopic, records.size()).observeFor(30, TimeUnit.SECONDS).build());
}
for (String observedValue : observedValues) {
Event obtainedEvent = Json.decodeValue(observedValue, Event.class);
DataImportEventPayload eventPayload = Json.decodeValue(obtainedEvent.getEventPayload(), DataImportEventPayload.class);
if (jobExecutionId.equals(eventPayload.getJobExecutionId())) {
testedEventsPayLoads.add(eventPayload);
}
}
assertEquals(EXPECTED_ERROR_EVENTS_NUMBER, testedEventsPayLoads.size());
for (DataImportEventPayload eventPayload : testedEventsPayLoads) {
String recordId = eventPayload.getContext().get(ParsedRecordChunksErrorHandler.RECORD_ID_HEADER);
String error = eventPayload.getContext().get(ParsedRecordChunksErrorHandler.ERROR_KEY);
assertEquals(DI_ERROR.value(), eventPayload.getEventType());
assertEquals(TENANT_ID, eventPayload.getTenant());
assertTrue(StringUtils.isNotBlank(recordId));
for (String errorMessage : errorMessages) {
assertTrue(error.contains(errorMessage));
}
assertFalse(eventPayload.getEventsChain().isEmpty());
assertEquals(DI_LOG_SRS_MARC_BIB_RECORD_CREATED.value(), eventPayload.getEventsChain().get(0));
}
}
use of org.folio.rest.jaxrs.model.Event in project mod-source-record-storage by folio-org.
the class QuickMarcKafkaHandlerTest method createRequest.
private SendKeyValues<String, String> createRequest(HashMap<String, String> payload) {
String topic = formatTopicName(kafkaConfig.getEnvId(), getDefaultNameSpace(), TENANT_ID, QM_RECORD_UPDATED.name());
Event event = new Event().withId(UUID.randomUUID().toString()).withEventPayload(Json.encode(payload));
KeyValue<String, String> eventRecord = new KeyValue<>(KAFKA_KEY_NAME, Json.encode(event));
eventRecord.addHeader(OkapiConnectionParams.OKAPI_URL_HEADER, OKAPI_URL, Charset.defaultCharset());
eventRecord.addHeader(OkapiConnectionParams.OKAPI_TENANT_HEADER, TENANT_ID, Charset.defaultCharset());
eventRecord.addHeader(OkapiConnectionParams.OKAPI_TOKEN_HEADER, TOKEN, Charset.defaultCharset());
return SendKeyValues.to(topic, Collections.singletonList(eventRecord)).useDefaults();
}
use of org.folio.rest.jaxrs.model.Event in project mod-source-record-storage by folio-org.
the class DataImportKafkaHandler method handle.
@Override
public Future<String> handle(KafkaConsumerRecord<String, String> targetRecord) {
String recordId = extractValueFromHeaders(targetRecord.headers(), RECORD_ID_HEADER);
String chunkId = extractValueFromHeaders(targetRecord.headers(), CHUNK_ID_HEADER);
try {
Promise<String> promise = Promise.promise();
Event event = ObjectMapperTool.getMapper().readValue(targetRecord.value(), Event.class);
DataImportEventPayload eventPayload = Json.decodeValue(event.getEventPayload(), DataImportEventPayload.class);
LOGGER.debug("Data import event payload has been received with event type: '{}' by jobExecutionId: '{}' and recordId: '{}' and chunkId: '{}'", eventPayload.getEventType(), eventPayload.getJobExecutionId(), recordId, chunkId);
eventPayload.getContext().put(RECORD_ID_HEADER, recordId);
eventPayload.getContext().put(CHUNK_ID_HEADER, chunkId);
OkapiConnectionParams params = RestUtil.retrieveOkapiConnectionParams(eventPayload, vertx);
String jobProfileSnapshotId = eventPayload.getContext().get(PROFILE_SNAPSHOT_ID_KEY);
profileSnapshotCache.get(jobProfileSnapshotId, params).toCompletionStage().thenCompose(snapshotOptional -> snapshotOptional.map(profileSnapshot -> EventManager.handleEvent(eventPayload, profileSnapshot)).orElse(CompletableFuture.failedFuture(new EventProcessingException(format("Job profile snapshot with id '%s' does not exist", jobProfileSnapshotId))))).whenComplete((processedPayload, throwable) -> {
if (throwable != null) {
promise.fail(throwable);
} else if (DI_ERROR.value().equals(processedPayload.getEventType())) {
promise.fail(format("Failed to process data import event payload from topic '%s' by jobExecutionId: '%s' with recordId: '%s' and chunkId: '%s' ", targetRecord.topic(), eventPayload.getJobExecutionId(), recordId, chunkId));
} else {
promise.complete(targetRecord.key());
}
});
return promise.future();
} catch (Exception e) {
LOGGER.error("Failed to process data import kafka record from topic '{}' with recordId: '{}' and chunkId: '{}' ", targetRecord.topic(), recordId, chunkId, e);
return Future.failedFuture(e);
}
}
use of org.folio.rest.jaxrs.model.Event in project mod-source-record-storage by folio-org.
the class ParsedRecordChunksKafkaHandler method sendBackRecordsBatchResponse.
private Future<String> sendBackRecordsBatchResponse(RecordsBatchResponse recordsBatchResponse, List<KafkaHeader> kafkaHeaders, String tenantId, int chunkNumber, String eventType, KafkaConsumerRecord<String, String> commonRecord) {
Event event;
event = new Event().withId(UUID.randomUUID().toString()).withEventType(DI_PARSED_RECORDS_CHUNK_SAVED.value()).withEventPayload(Json.encode(normalize(recordsBatchResponse))).withEventMetadata(new EventMetadata().withTenantId(tenantId).withEventTTL(1).withPublishedBy(constructModuleName()));
String key = String.valueOf(indexer.incrementAndGet() % maxDistributionNum);
String topicName = KafkaTopicNameHelper.formatTopicName(kafkaConfig.getEnvId(), KafkaTopicNameHelper.getDefaultNameSpace(), tenantId, DI_PARSED_RECORDS_CHUNK_SAVED.value());
KafkaProducerRecord<String, String> targetRecord = KafkaProducerRecord.create(topicName, key, Json.encode(event));
targetRecord.addHeaders(kafkaHeaders);
Promise<String> writePromise = Promise.promise();
String producerName = DI_PARSED_RECORDS_CHUNK_SAVED + "_Producer";
KafkaProducer<String, String> producer = KafkaProducer.createShared(Vertx.currentContext().owner(), producerName, kafkaConfig.getProducerProps());
producer.write(targetRecord, war -> {
producer.end(ear -> producer.close());
if (war.succeeded()) {
String recordId = extractValueFromHeaders(commonRecord.headers(), RECORD_ID_HEADER);
String chunkId = extractValueFromHeaders(commonRecord.headers(), CHUNK_ID_HEADER);
LOGGER.debug("RecordCollection processing has been completed with response sent... event: '{}', chunkId: '{}', chunkNumber '{}'-'{}' with recordId: '{}'", eventType, chunkId, chunkNumber, targetRecord.key(), recordId);
writePromise.complete(targetRecord.key());
} else {
Throwable cause = war.cause();
LOGGER.error("{} write error {}", producerName, cause);
writePromise.fail(cause);
}
});
return writePromise.future();
}
use of org.folio.rest.jaxrs.model.Event in project mod-source-record-storage by folio-org.
the class ParsedRecordChunksKafkaHandler method handle.
@Override
public Future<String> handle(KafkaConsumerRecord<String, String> targetRecord) {
Event event = Json.decodeValue(targetRecord.value(), Event.class);
RecordCollection recordCollection = Json.decodeValue(event.getEventPayload(), RecordCollection.class);
List<KafkaHeader> kafkaHeaders = targetRecord.headers();
OkapiConnectionParams okapiConnectionParams = new OkapiConnectionParams(KafkaHeaderUtils.kafkaHeadersToMap(kafkaHeaders), vertx);
String tenantId = okapiConnectionParams.getTenantId();
String recordId = extractValueFromHeaders(targetRecord.headers(), RECORD_ID_HEADER);
String chunkId = extractValueFromHeaders(targetRecord.headers(), CHUNK_ID_HEADER);
String key = targetRecord.key();
int chunkNumber = chunkCounter.incrementAndGet();
DataImportEventPayload eventPayload = Json.decodeValue(event.getEventPayload(), DataImportEventPayload.class);
try {
LOGGER.debug("RecordCollection has been received with event: '{}', chunkId: '{}', starting processing... chunkNumber '{}'-'{}' with recordId: '{}'' ", eventPayload.getEventType(), chunkId, chunkNumber, key, recordId);
return recordService.saveRecords(recordCollection, tenantId).compose(recordsBatchResponse -> sendBackRecordsBatchResponse(recordsBatchResponse, kafkaHeaders, tenantId, chunkNumber, eventPayload.getEventType(), targetRecord));
} catch (Exception e) {
LOGGER.error("RecordCollection processing has failed with errors with event: '{}', chunkId: '{}', chunkNumber '{}'-'{}' with recordId: '{}' ", eventPayload.getEventType(), chunkId, chunkNumber, key, recordId);
return Future.failedFuture(e);
}
}
Aggregations