use of org.folio.rest.jaxrs.model.JobExecutionSourceChunk in project mod-source-record-manager by folio-org.
the class JobExecutionSourceChunkDaoImpl method mapResultSetToJobExecutionSourceChunks.
private List<JobExecutionSourceChunk> mapResultSetToJobExecutionSourceChunks(RowSet<Row> resultSet) {
List<JobExecutionSourceChunk> result = new ArrayList<>();
resultSet.forEach(row -> {
JsonObject jsonb = row.getJsonObject("jsonb");
JobExecutionSourceChunk chunk = new JobExecutionSourceChunk().withId(row.getUUID(ID_FIELD).toString()).withJobExecutionId(row.getUUID(JOB_EXECUTION_ID_FIELD).toString()).withChunkSize(jsonb.getInteger(CHUNK_SIZE_JSONB_FIELD)).withState(State.fromValue(jsonb.getString(STATE_JSONB_FIELD))).withCreatedDate(new Date(jsonb.getLong(CREATED_DATE_JSONB_FIELD))).withLast(jsonb.getBoolean(LAST_JSONB_FIELD));
result.add(chunk);
});
return result;
}
use of org.folio.rest.jaxrs.model.JobExecutionSourceChunk in project mod-source-record-manager by folio-org.
the class EventDrivenChunkProcessingServiceImpl method processRawRecordsChunk.
@Override
protected Future<Boolean> processRawRecordsChunk(RawRecordsDto incomingChunk, JobExecutionSourceChunk sourceChunk, String jobExecutionId, OkapiConnectionParams params) {
LOGGER.debug("Starting to process raw records chunk with id: {} for jobExecutionId: {}. Chunk size: {}.", sourceChunk.getId(), jobExecutionId, sourceChunk.getChunkSize());
Promise<Boolean> promise = Promise.promise();
initializeJobExecutionProgressIfNecessary(jobExecutionId, incomingChunk, params.getTenantId()).compose(ar -> checkAndUpdateJobExecutionStatusIfNecessary(jobExecutionId, new StatusDto().withStatus(StatusDto.Status.PARSING_IN_PROGRESS), params)).compose(jobExec -> changeEngineService.parseRawRecordsChunkForJobExecution(incomingChunk, jobExec, sourceChunk.getId(), params)).onComplete(sendEventsAr -> updateJobExecutionIfAllSourceChunksMarkedAsError(jobExecutionId, params).onComplete(updateAr -> promise.handle(sendEventsAr.map(true))));
return promise.future();
}
use of org.folio.rest.jaxrs.model.JobExecutionSourceChunk in project mod-source-record-manager by folio-org.
the class JobExecutionDaoImplTest method prepareDataForDeletion.
private Future<JobExecution> prepareDataForDeletion(Instant completedDate) {
InitJobExecutionsRsDto response = constructAndPostInitJobExecutionRqDto(1);
List<JobExecution> createdJobExecutions = response.getJobExecutions();
assertThat(createdJobExecutions.size(), Matchers.is(1));
JobExecution jobExec = createdJobExecutions.get(0);
jobExec.withCompletedDate(Date.from(completedDate));
JobExecutionProgress jobExecutionProgress = new JobExecutionProgress().withJobExecutionId(jobExec.getId()).withTotal(1).withCurrentlySucceeded(1).withCurrentlyFailed(0);
JobMonitoring jobMonitoring = new JobMonitoring().withId(UUID.randomUUID().toString()).withJobExecutionId(jobExec.getId()).withNotificationSent(true).withLastEventTimestamp(new Date());
JournalRecord journalRecord = new JournalRecord().withJobExecutionId(jobExec.getId()).withSourceRecordOrder(0).withSourceId(UUID.randomUUID().toString()).withEntityType(JournalRecord.EntityType.MARC_BIBLIOGRAPHIC).withEntityId(UUID.randomUUID().toString()).withActionType(CREATE).withActionDate(new Date()).withActionStatus(COMPLETED);
JobExecutionSourceChunk jobExecutionSourceChunk = new JobExecutionSourceChunk().withId("67dfac11-1caf-4470-9ad1-d533f6360bdd").withJobExecutionId(jobExec.getId()).withLast(false).withState(JobExecutionSourceChunk.State.COMPLETED).withChunkSize(10).withProcessedAmount(42);
return jobExecutionDao.updateJobExecution(jobExec, TENANT_ID).compose(jobExecution -> {
Future<RowSet<Row>> saveProgressFuture = jobExecutionProgressDao.save(jobExecutionProgress, TENANT_ID);
Future<String> saveMonitoringFuture = jobMonitoringDao.save(jobMonitoring, TENANT_ID);
Future<String> saveJournalFuture = journalRecordDao.save(journalRecord, TENANT_ID);
Future<String> saveSourceChunkFuture = jobExecutionSourceChunkDao.save(jobExecutionSourceChunk, TENANT_ID);
return CompositeFuture.all(saveProgressFuture, saveMonitoringFuture, saveJournalFuture, saveSourceChunkFuture).compose(ar -> Future.succeededFuture(jobExecution));
});
}
use of org.folio.rest.jaxrs.model.JobExecutionSourceChunk in project mod-source-record-manager by folio-org.
the class AbstractChunkProcessingService method processChunk.
@Override
public Future<Boolean> processChunk(RawRecordsDto incomingChunk, String jobExecutionId, OkapiConnectionParams params) {
LOGGER.debug("AbstractChunkProcessingService:: processChunk for jobExecutionId: {}", jobExecutionId);
prepareChunk(incomingChunk);
return jobExecutionService.getJobExecutionById(jobExecutionId, params.getTenantId()).compose(optionalJobExecution -> optionalJobExecution.map(jobExecution -> {
JobExecutionSourceChunk sourceChunk = new JobExecutionSourceChunk().withId(incomingChunk.getId()).withJobExecutionId(jobExecutionId).withLast(incomingChunk.getRecordsMetadata().getLast()).withState(JobExecutionSourceChunk.State.IN_PROGRESS).withChunkSize(incomingChunk.getInitialRecords().size()).withCreatedDate(new Date());
return jobExecutionSourceChunkDao.save(sourceChunk, params.getTenantId()).compose(ar -> processRawRecordsChunk(incomingChunk, sourceChunk, jobExecution.getId(), params)).map(true).recover(throwable -> throwable instanceof PgException && ((PgException) throwable).getCode().equals(UNIQUE_CONSTRAINT_VIOLATION_CODE) ? Future.failedFuture(new DuplicateEventException(String.format("Source chunk with %s id for %s jobExecution is already exists", incomingChunk.getId(), jobExecutionId))) : Future.failedFuture(throwable));
}).orElse(Future.failedFuture(new NotFoundException(String.format("Couldn't find JobExecution with id %s", jobExecutionId)))));
}
use of org.folio.rest.jaxrs.model.JobExecutionSourceChunk in project mod-source-record-manager by folio-org.
the class ChangeEngineServiceImpl method parseRawRecordsChunkForJobExecution.
@Override
public Future<List<Record>> parseRawRecordsChunkForJobExecution(RawRecordsDto chunk, JobExecution jobExecution, String sourceChunkId, OkapiConnectionParams params) {
Promise<List<Record>> promise = Promise.promise();
Future<List<Record>> futureParsedRecords = parseRecords(chunk.getInitialRecords(), chunk.getRecordsMetadata().getContentType(), jobExecution, sourceChunkId, params.getTenantId(), params);
futureParsedRecords.compose(parsedRecords -> ensureMappingMetaDataSnapshot(jobExecution.getId(), parsedRecords, params).map(parsedRecords)).onSuccess(parsedRecords -> {
fillParsedRecordsWithAdditionalFields(parsedRecords);
if (updateMarcActionExists(jobExecution)) {
updateRecords(parsedRecords, jobExecution, params).onSuccess(ar -> promise.complete(parsedRecords)).onFailure(promise::fail);
} else if (deleteMarcActionExists(jobExecution)) {
deleteRecords(parsedRecords, jobExecution, params).onSuccess(ar -> promise.complete(parsedRecords)).onFailure(promise::fail);
} else {
saveRecords(params, jobExecution, parsedRecords).onComplete(postAr -> {
if (postAr.failed()) {
StatusDto statusDto = new StatusDto().withStatus(StatusDto.Status.ERROR).withErrorStatus(StatusDto.ErrorStatus.RECORD_UPDATE_ERROR);
jobExecutionService.updateJobExecutionStatus(jobExecution.getId(), statusDto, params).onComplete(r -> {
if (r.failed()) {
LOGGER.error("Error during update jobExecution and snapshot status", r.cause());
}
});
jobExecutionSourceChunkDao.getById(sourceChunkId, params.getTenantId()).compose(optional -> optional.map(sourceChunk -> jobExecutionSourceChunkDao.update(sourceChunk.withState(JobExecutionSourceChunk.State.ERROR), params.getTenantId())).orElseThrow(() -> new NotFoundException(String.format("Couldn't update failed jobExecutionSourceChunk status to ERROR, jobExecutionSourceChunk with id %s was not found", sourceChunkId)))).onComplete(ar -> promise.fail(postAr.cause()));
} else {
promise.complete(parsedRecords);
}
});
}
}).onFailure(th -> {
LOGGER.error("Error parsing records: {}", th.getMessage());
promise.fail(th);
});
return promise.future();
}
Aggregations