use of org.folio.rest.jaxrs.model.InitialRecord in project mod-source-record-manager by folio-org.
the class ChangeManagerAPITest method fillInRecordOrderIfAtLeastOneRecordHasNoOrder.
private void fillInRecordOrderIfAtLeastOneRecordHasNoOrder(String rawRecord) throws InterruptedException {
RawRecordsDto rawRecordsDto = new RawRecordsDto().withId(UUID.randomUUID().toString()).withRecordsMetadata(new RecordsMetadata().withLast(true).withCounter(7).withContentType(RecordsMetadata.ContentType.MARC_RAW)).withInitialRecords(asList(new InitialRecord().withRecord(CORRECT_RAW_RECORD_1), new InitialRecord().withRecord(CORRECT_RAW_RECORD_2).withOrder(5), new InitialRecord().withRecord(rawRecord).withOrder(6)));
InitJobExecutionsRsDto response = constructAndPostInitJobExecutionRqDto(1);
List<JobExecution> createdJobExecutions = response.getJobExecutions();
assertThat(createdJobExecutions.size(), is(1));
JobExecution jobExec = createdJobExecutions.get(0);
RestAssured.given().spec(spec).body(new JobProfileInfo().withName("MARC records").withId(DEFAULT_JOB_PROFILE_ID).withDataType(DataType.MARC)).when().put(JOB_EXECUTION_PATH + jobExec.getId() + JOB_PROFILE_PATH).then().statusCode(HttpStatus.SC_OK);
RestAssured.given().spec(spec).body(rawRecordsDto).when().post(JOB_EXECUTION_PATH + jobExec.getId() + RECORDS_PATH).then().statusCode(HttpStatus.SC_NO_CONTENT);
String topicToObserve = formatToKafkaTopicName(DI_RAW_RECORDS_CHUNK_PARSED.value());
List<String> observedValues = kafkaCluster.observeValues(ObserveKeyValues.on(topicToObserve, 1).observeFor(30, TimeUnit.SECONDS).build());
Event obtainedEvent = Json.decodeValue(observedValues.get(0), Event.class);
assertEquals(DI_RAW_RECORDS_CHUNK_PARSED.value(), obtainedEvent.getEventType());
RecordCollection processedRecords = Json.decodeValue(obtainedEvent.getEventPayload(), RecordCollection.class);
assertEquals(3, processedRecords.getRecords().size());
assertEquals(4, processedRecords.getRecords().get(0).getOrder().intValue());
assertEquals(5, processedRecords.getRecords().get(1).getOrder().intValue());
assertEquals(6, processedRecords.getRecords().get(2).getOrder().intValue());
}
use of org.folio.rest.jaxrs.model.InitialRecord in project mod-source-record-manager by folio-org.
the class ParsedRecordsDiErrorProviderTest method shouldParseInitialRecords.
@Test
public void shouldParseInitialRecords(TestContext context) {
Async async = context.async();
JobExecution jobExecution = new JobExecution().withId(JOB_EXECUTION_ID);
when(jobExecutionService.getJobExecutionById(JOB_EXECUTION_ID, TENANT_ID)).thenReturn(Future.succeededFuture(Optional.of(jobExecution)));
RawRecordsDto rawRecordsDto = new RawRecordsDto().withInitialRecords(Lists.newArrayList(new InitialRecord())).withRecordsMetadata(new RecordsMetadata().withContentType(RecordsMetadata.ContentType.MARC_JSON));
when(changeEngineService.getParsedRecordsFromInitialRecords(eq(rawRecordsDto.getInitialRecords()), eq(rawRecordsDto.getRecordsMetadata().getContentType()), eq(jobExecution), anyString())).thenReturn(Lists.newArrayList(new Record().withParsedRecord(new ParsedRecord())));
Future<List<Record>> parserRecordsFuture = diErrorProvider.getParsedRecordsFromInitialRecords(getOkapiParams(), JOB_EXECUTION_ID, rawRecordsDto);
parserRecordsFuture.onComplete(ar -> {
List<Record> parsedRecords = ar.result();
assertEquals(1, parsedRecords.size());
async.complete();
});
}
use of org.folio.rest.jaxrs.model.InitialRecord in project mod-source-record-manager by folio-org.
the class RecordProcessedEventHandlingServiceImplTest method shouldMarkJobExecutionAsErrorOnHandleDIErrorEventWhenAllRecordsProcessed.
@Test
public void shouldMarkJobExecutionAsErrorOnHandleDIErrorEventWhenAllRecordsProcessed(TestContext context) {
// given
Async async = context.async();
RawRecordsDto rawRecordsDto = new RawRecordsDto().withInitialRecords(Collections.singletonList(new InitialRecord().withRecord(CORRECT_RAW_RECORD))).withRecordsMetadata(new RecordsMetadata().withLast(true).withCounter(2).withTotal(2).withContentType(RecordsMetadata.ContentType.MARC_RAW));
HashMap<String, String> payloadContext = new HashMap<>();
DataImportEventPayload datImpErrorEventPayload = new DataImportEventPayload().withEventType(DataImportEventTypes.DI_ERROR.value()).withContext(payloadContext);
DataImportEventPayload datImpCompletedEventPayload = new DataImportEventPayload().withEventType(DataImportEventTypes.DI_COMPLETED.value()).withContext(payloadContext);
Future<Boolean> future = jobExecutionService.initializeJobExecutions(initJobExecutionsRqDto, params).compose(initJobExecutionsRsDto -> jobExecutionService.setJobProfileToJobExecution(initJobExecutionsRsDto.getParentJobExecutionId(), jobProfileInfo, params)).map(jobExecution -> {
datImpErrorEventPayload.withJobExecutionId(jobExecution.getId());
return datImpCompletedEventPayload.withJobExecutionId(jobExecution.getId());
}).compose(ar -> chunkProcessingService.processChunk(rawRecordsDto, datImpErrorEventPayload.getJobExecutionId(), params));
// when
Future<Optional<JobExecution>> jobFuture = future.compose(ar -> recordProcessedEventHandlingService.handle(Json.encode(datImpErrorEventPayload), params)).compose(ar -> recordProcessedEventHandlingService.handle(Json.encode(datImpCompletedEventPayload), params)).compose(ar -> jobExecutionService.getJobExecutionById(datImpCompletedEventPayload.getJobExecutionId(), TENANT_ID));
// then
jobFuture.onComplete(ar -> {
context.assertTrue(ar.succeeded());
context.assertTrue(ar.result().isPresent());
JobExecution jobExecution = ar.result().get();
context.assertEquals(ERROR, jobExecution.getStatus());
context.assertEquals(JobExecution.UiStatus.ERROR, jobExecution.getUiStatus());
context.assertEquals(rawRecordsDto.getRecordsMetadata().getTotal(), jobExecution.getProgress().getTotal());
context.assertNotNull(jobExecution.getStartedDate());
context.assertNotNull(jobExecution.getCompletedDate());
verify(2, putRequestedFor(new UrlPathPattern(new RegexPattern(SNAPSHOT_SERVICE_URL + "/.*"), true)));
async.complete();
});
}
use of org.folio.rest.jaxrs.model.InitialRecord in project mod-source-record-manager by folio-org.
the class EventDrivenChunkProcessingServiceImplTest method shouldProcessErrorRawRecord.
@Test
public void shouldProcessErrorRawRecord(TestContext context) {
Async async = context.async();
RawRecordsDto rawRecordsDto = new RawRecordsDto().withRecordsMetadata(new RecordsMetadata().withLast(false).withCounter(1).withContentType(RecordsMetadata.ContentType.MARC_RAW)).withInitialRecords(Collections.singletonList(new InitialRecord().withRecord(RAW_RECORD_RESULTING_IN_PARSING_ERROR)));
Future<Boolean> future = jobExecutionService.initializeJobExecutions(initJobExecutionsRqDto, params).compose(initJobExecutionsRsDto -> jobExecutionService.setJobProfileToJobExecution(initJobExecutionsRsDto.getParentJobExecutionId(), jobProfileInfo, params)).compose(jobExecution -> chunkProcessingService.processChunk(rawRecordsDto, jobExecution.getId(), params));
future.onComplete(ar -> {
context.assertTrue(ar.succeeded());
ArgumentCaptor<StatusDto> captor = ArgumentCaptor.forClass(StatusDto.class);
Mockito.verify(jobExecutionService, times(1)).updateJobExecutionStatus(anyString(), captor.capture(), isA(OkapiConnectionParams.class));
context.assertTrue(PARSING_IN_PROGRESS.equals(captor.getAllValues().get(0).getStatus()));
verify(1, postRequestedFor(urlEqualTo(SNAPSHOT_SERVICE_URL)));
async.complete();
});
}
use of org.folio.rest.jaxrs.model.InitialRecord in project mod-source-record-manager by folio-org.
the class ChangeEngineServiceImpl method parseRecords.
/**
* Parse list of source records
*
* @param rawRecords - list of raw records for parsing
* @param jobExecution - job execution of record's parsing
* @param sourceChunkId - id of the JobExecutionSourceChunk
* @param tenantId - tenant id
* @return - list of records with parsed or error data
*/
private Future<List<Record>> parseRecords(List<InitialRecord> rawRecords, RecordsMetadata.ContentType recordContentType, JobExecution jobExecution, String sourceChunkId, String tenantId, OkapiConnectionParams okapiParams) {
if (CollectionUtils.isEmpty(rawRecords)) {
return Future.succeededFuture(Collections.emptyList());
}
var counter = new MutableInt();
// if number of records is more than THRESHOLD_CHUNK_SIZE update the progress every 20% of processed records,
// otherwise update it once after all the records are processed
int partition = rawRecords.size() > THRESHOLD_CHUNK_SIZE ? rawRecords.size() / 5 : rawRecords.size();
var records = getParsedRecordsFromInitialRecords(rawRecords, recordContentType, jobExecution, sourceChunkId).stream().peek(stat -> {
// NOSONAR
if (counter.incrementAndGet() % partition == 0) {
LOGGER.info("Parsed {} records out of {}", counter.intValue(), rawRecords.size());
jobExecutionSourceChunkDao.getById(sourceChunkId, tenantId).compose(optional -> optional.map(sourceChunk -> jobExecutionSourceChunkDao.update(sourceChunk.withProcessedAmount(sourceChunk.getProcessedAmount() + counter.intValue()), tenantId)).orElseThrow(() -> new NotFoundException(format("Couldn't update jobExecutionSourceChunk progress, jobExecutionSourceChunk with id %s was not found", sourceChunkId))));
}
}).collect(Collectors.toList());
Promise<List<Record>> promise = Promise.promise();
List<Future> listFuture = executeInBatches(records, batch -> verifyMarcHoldings004Field(batch, okapiParams));
filterMarcHoldingsBy004Field(records, listFuture, okapiParams, jobExecution, promise);
return promise.future();
}
Aggregations