use of org.folio.MappingMetadataDto in project mod-inventory by folio-org.
the class UpdateAuthorityEventHandlerTest method setUp.
@Before
public void setUp() throws IOException {
MockitoAnnotations.openMocks(this);
MappingManager.clearReaderFactories();
MappingMetadataCache mappingMetadataCache = new MappingMetadataCache(vertx, vertx.createHttpClient(), 3600);
eventHandler = new UpdateAuthorityEventHandler(storage, mappingMetadataCache, publisher);
JsonObject mappingRules = new JsonObject(TestUtil.readFileFromPath(MAPPING_RULES_PATH));
doAnswer(invocationOnMock -> {
Consumer<Success<Void>> successHandler = invocationOnMock.getArgument(1);
successHandler.accept(new Success<>(null));
return null;
}).when(authorityCollection).update(any(), any(), any());
WireMock.stubFor(get(new UrlPathPattern(new RegexPattern(MAPPING_METADATA_URL + "/.*"), true)).willReturn(WireMock.ok().withBody(Json.encode(new MappingMetadataDto().withMappingParams(Json.encode(new MappingParameters())).withMappingRules(mappingRules.encode())))));
}
use of org.folio.MappingMetadataDto in project mod-inventory by folio-org.
the class MappingMetadataCacheTest method shouldReturnMappingMetadata.
@Test
public void shouldReturnMappingMetadata(TestContext context) {
Async async = context.async();
Future<Optional<MappingMetadataDto>> optionalFuture = mappingMetadataCache.get(mappingMetadata.getJobExecutionId(), this.context);
optionalFuture.onComplete(ar -> {
context.assertTrue(ar.succeeded());
context.assertTrue(ar.result().isPresent());
MappingMetadataDto actualMappingMetadata = ar.result().get();
context.assertEquals(mappingMetadata.getJobExecutionId(), actualMappingMetadata.getJobExecutionId());
context.assertNotNull(actualMappingMetadata.getMappingParams());
context.assertNotNull(actualMappingMetadata.getMappingRules());
context.assertEquals(mappingMetadata.getMappingParams(), actualMappingMetadata.getMappingParams());
context.assertEquals(mappingMetadata.getMappingRules(), actualMappingMetadata.getMappingRules());
async.complete();
});
}
use of org.folio.MappingMetadataDto in project mod-inventory by folio-org.
the class MarcBibInstanceHridSetKafkaHandler method handle.
@Override
public Future<String> handle(KafkaConsumerRecord<String, String> record) {
try {
Promise<String> promise = Promise.promise();
Event event = OBJECT_MAPPER.readValue(record.value(), Event.class);
@SuppressWarnings("unchecked") HashMap<String, String> eventPayload = OBJECT_MAPPER.readValue(event.getEventPayload(), HashMap.class);
Map<String, String> headersMap = KafkaHeaderUtils.kafkaHeadersToMap(record.headers());
String recordId = headersMap.get(RECORD_ID_HEADER);
String chunkId = headersMap.get(CHUNK_ID_HEADER);
String jobExecutionId = eventPayload.get(JOB_EXECUTION_ID_HEADER);
LOGGER.info("Event payload has been received with event type: {}, recordId: {} by jobExecution: {} and chunkId: {}", event.getEventType(), recordId, jobExecutionId, chunkId);
if (isEmpty(eventPayload.get(MARC_KEY))) {
String message = format("Event payload does not contain required data to update Instance with event type: '%s', recordId: '%s' by jobExecution: '%s' and chunkId: '%s'", event.getEventType(), recordId, jobExecutionId, chunkId);
LOGGER.error(message);
return Future.failedFuture(message);
}
Context context = EventHandlingUtil.constructContext(headersMap.get(OKAPI_TENANT_HEADER), headersMap.get(OKAPI_TOKEN_HEADER), headersMap.get(OKAPI_URL_HEADER));
Record marcRecord = new JsonObject(eventPayload.get(MARC_KEY)).mapTo(Record.class);
mappingMetadataCache.get(jobExecutionId, context).map(metadataOptional -> metadataOptional.orElseThrow(() -> new EventProcessingException(format(MAPPING_METADATA_NOT_FOUND_MSG, jobExecutionId)))).onSuccess(mappingMetadataDto -> ensureEventPayloadWithMappingMetadata(eventPayload, mappingMetadataDto)).compose(v -> instanceUpdateDelegate.handle(eventPayload, marcRecord, context)).onComplete(ar -> {
if (ar.succeeded()) {
eventPayload.remove(CURRENT_RETRY_NUMBER);
promise.complete(record.key());
} else {
if (ar.cause() instanceof OptimisticLockingException) {
processOLError(record, promise, eventPayload, ar);
} else {
eventPayload.remove(CURRENT_RETRY_NUMBER);
LOGGER.error("Failed to set MarcBib Hrid by jobExecutionId {}:{}", jobExecutionId, ar.cause());
promise.fail(ar.cause());
}
}
});
return promise.future();
} catch (Exception e) {
LOGGER.error(format("Failed to process data import kafka record from topic %s", record.topic()), e);
return Future.failedFuture(e);
}
}
use of org.folio.MappingMetadataDto in project mod-inventory by folio-org.
the class MarcHoldingsRecordHridSetKafkaHandler method handle.
@Override
public Future<String> handle(KafkaConsumerRecord<String, String> record) {
try {
Promise<String> promise = Promise.promise();
Event event = OBJECT_MAPPER.readValue(record.value(), Event.class);
@SuppressWarnings("unchecked") HashMap<String, String> eventPayload = OBJECT_MAPPER.readValue(event.getEventPayload(), HashMap.class);
Map<String, String> headersMap = KafkaHeaderUtils.kafkaHeadersToMap(record.headers());
String recordId = headersMap.get(RECORD_ID_HEADER);
String chunkId = headersMap.get(CHUNK_ID_HEADER);
String jobExecutionId = eventPayload.get(JOB_EXECUTION_ID_HEADER);
LOGGER.info("Event payload has been received with event type: {}, recordId: {} by jobExecution: {} and chunkId: {}", event.getEventType(), recordId, jobExecutionId, chunkId);
if (isEmpty(eventPayload.get(MARC_KEY))) {
String message = String.format("Event payload does not contain required data to update Holdings with event type: '%s', recordId: '%s' by jobExecution: '%s' and chunkId: '%s'", event.getEventType(), recordId, jobExecutionId, chunkId);
LOGGER.error(message);
return Future.failedFuture(message);
}
Context context = constructContext(headersMap.get(OKAPI_TENANT_HEADER), headersMap.get(OKAPI_TOKEN_HEADER), headersMap.get(OKAPI_URL_HEADER));
Record marcRecord = Json.decodeValue(eventPayload.get(MARC_KEY), Record.class);
mappingMetadataCache.get(jobExecutionId, context).map(metadataOptional -> metadataOptional.orElseThrow(() -> new EventProcessingException(format(MAPPING_METADATA_NOT_FOUND_MSG, jobExecutionId)))).onSuccess(mappingMetadataDto -> ensureEventPayloadWithMappingMetadata(eventPayload, mappingMetadataDto)).compose(v -> holdingsRecordUpdateDelegate.handle(eventPayload, marcRecord, context)).onComplete(ar -> {
if (ar.succeeded()) {
eventPayload.remove(CURRENT_RETRY_NUMBER);
promise.complete(record.key());
} else {
if (ar.cause() instanceof OptimisticLockingException) {
processOLError(record, promise, eventPayload, ar);
} else {
eventPayload.remove(CURRENT_RETRY_NUMBER);
LOGGER.error("Failed to process data import event payload ", ar.cause());
promise.fail(ar.cause());
}
}
});
return promise.future();
} catch (Exception e) {
LOGGER.error(format("Failed to process data import kafka record from topic %s ", record.topic()), e);
return Future.failedFuture(e);
}
}
use of org.folio.MappingMetadataDto in project mod-inventory by folio-org.
the class UpdateItemEventHandler method handle.
@Override
public CompletableFuture<DataImportEventPayload> handle(DataImportEventPayload dataImportEventPayload) {
CompletableFuture<DataImportEventPayload> future = new CompletableFuture<>();
try {
dataImportEventPayload.setEventType(DI_INVENTORY_ITEM_UPDATED.value());
HashMap<String, String> payloadContext = dataImportEventPayload.getContext();
if (isNull(payloadContext) || isBlank(payloadContext.get(MARC_BIBLIOGRAPHIC.value())) || isBlank(payloadContext.get(ITEM.value()))) {
LOG.error(PAYLOAD_HAS_NO_DATA_MSG);
return CompletableFuture.failedFuture(new EventProcessingException(PAYLOAD_HAS_NO_DATA_MSG));
}
if (dataImportEventPayload.getCurrentNode().getChildSnapshotWrappers().isEmpty()) {
LOG.error(ACTION_HAS_NO_MAPPING_MSG);
return CompletableFuture.failedFuture(new EventProcessingException(ACTION_HAS_NO_MAPPING_MSG));
}
LOG.info("Processing UpdateItemEventHandler starting with jobExecutionId: {}.", dataImportEventPayload.getJobExecutionId());
AtomicBoolean isProtectedStatusChanged = new AtomicBoolean();
Context context = EventHandlingUtil.constructContext(dataImportEventPayload.getTenant(), dataImportEventPayload.getToken(), dataImportEventPayload.getOkapiUrl());
String jobExecutionId = dataImportEventPayload.getJobExecutionId();
String recordId = dataImportEventPayload.getContext().get(RECORD_ID_HEADER);
String chunkId = dataImportEventPayload.getContext().get(CHUNK_ID_HEADER);
mappingMetadataCache.get(jobExecutionId, context).map(parametersOptional -> parametersOptional.orElseThrow(() -> new EventProcessingException(format(MAPPING_METADATA_NOT_FOUND_MSG, jobExecutionId, recordId, chunkId)))).compose(mappingMetadataDto -> {
String oldItemStatus = preparePayloadAndGetStatus(dataImportEventPayload, payloadContext, mappingMetadataDto);
JsonObject mappedItemAsJson = new JsonObject(payloadContext.get(ITEM.value()));
mappedItemAsJson = mappedItemAsJson.containsKey(ITEM_PATH_FIELD) ? mappedItemAsJson.getJsonObject(ITEM_PATH_FIELD) : mappedItemAsJson;
List<String> errors = validateItem(mappedItemAsJson, requiredFields);
if (!errors.isEmpty()) {
String msg = format("Mapped Instance is invalid: %s, by jobExecutionId: '%s' and recordId: '%s' and chunkId: '%s' ", errors, jobExecutionId, recordId, chunkId);
LOG.error(msg);
return Future.failedFuture(msg);
}
String newItemStatus = mappedItemAsJson.getJsonObject(STATUS_KEY).getString("name");
isProtectedStatusChanged.set(isProtectedStatusChanged(oldItemStatus, newItemStatus));
if (isProtectedStatusChanged.get()) {
mappedItemAsJson.getJsonObject(STATUS_KEY).put("name", oldItemStatus);
}
ItemCollection itemCollection = storage.getItemCollection(context);
Item itemToUpdate = ItemUtil.jsonToItem(mappedItemAsJson);
return verifyItemBarcodeUniqueness(itemToUpdate, itemCollection).compose(v -> updateItemAndRetryIfOLExists(itemToUpdate, itemCollection, dataImportEventPayload)).onSuccess(updatedItem -> {
if (isProtectedStatusChanged.get()) {
String msg = String.format(STATUS_UPDATE_ERROR_MSG, oldItemStatus, newItemStatus);
LOG.warn(msg);
dataImportEventPayload.getContext().put(ITEM.value(), ItemUtil.mapToJson(updatedItem).encode());
future.completeExceptionally(new EventProcessingException(msg));
} else {
addHoldingToPayloadIfNeeded(dataImportEventPayload, context, updatedItem).onComplete(item -> {
dataImportEventPayload.getContext().put(ITEM.value(), ItemUtil.mapToJson(updatedItem).encode());
future.complete(dataImportEventPayload);
});
}
});
}).onFailure(e -> {
LOG.error("Failed to update inventory Item by jobExecutionId: '{}' and recordId: '{}' and chunkId: '{}' ", jobExecutionId, recordId, chunkId, e);
future.completeExceptionally(e);
});
} catch (Exception e) {
LOG.error("Error updating inventory Item", e);
future.completeExceptionally(e);
}
return future;
}
Aggregations