use of org.haiku.haikudepotserver.job.model.JobService in project haikudepotserver by haiku.
the class PkgProminenceAndUserRatingSpreadsheetJobRunner method run.
@Override
public void run(JobService jobService, PkgProminenceAndUserRatingSpreadsheetJobSpecification specification) throws IOException {
Preconditions.checkArgument(null != jobService);
Preconditions.checkArgument(null != specification);
final ObjectContext context = serverRuntime.newContext();
// this will register the outbound data against the job.
JobDataWithByteSink jobDataWithByteSink = jobService.storeGeneratedData(specification.getGuid(), "download", MediaType.CSV_UTF_8.toString());
try (OutputStream outputStream = jobDataWithByteSink.getByteSink().openBufferedStream();
OutputStreamWriter outputStreamWriter = new OutputStreamWriter(outputStream);
CSVWriter writer = new CSVWriter(outputStreamWriter, ',')) {
writer.writeNext(new String[] { "pkg-name", "repository-code", "prominence-name", "prominence-ordering", "derived-rating", "derived-rating-sample-size" });
// stream out the packages.
long startMs = System.currentTimeMillis();
LOGGER.info("will produce prominence spreadsheet report");
long count = pkgService.eachPkg(context, false, pkg -> {
List<PkgProminence> pkgProminences = PkgProminence.findByPkg(context, pkg);
List<PkgUserRatingAggregate> pkgUserRatingAggregates = PkgUserRatingAggregate.findByPkg(context, pkg);
List<Repository> repositories = Stream.concat(pkgProminences.stream().map(PkgProminence::getRepository), pkgUserRatingAggregates.stream().map(PkgUserRatingAggregate::getRepository)).distinct().sorted().collect(Collectors.toList());
if (repositories.isEmpty()) {
writer.writeNext(new String[] { pkg.getName(), "", "", "", "", "" });
} else {
for (Repository repository : repositories) {
Optional<PkgProminence> pkgProminenceOptional = pkgProminences.stream().filter(pp -> pp.getRepository().equals(repository)).collect(SingleCollector.optional());
Optional<PkgUserRatingAggregate> pkgUserRatingAggregateOptional = pkgUserRatingAggregates.stream().filter(pura -> pura.getRepository().equals(repository)).collect(SingleCollector.optional());
writer.writeNext(new String[] { pkg.getName(), repository.getCode(), pkgProminenceOptional.map(p -> p.getProminence().getName()).orElse(""), pkgProminenceOptional.map(p -> p.getProminence().getOrdering().toString()).orElse(""), pkgUserRatingAggregateOptional.map(p -> p.getDerivedRating().toString()).orElse(""), pkgUserRatingAggregateOptional.map(p -> p.getDerivedRatingSampleSize().toString()).orElse("") });
}
}
return true;
});
LOGGER.info("did produce prominence spreadsheet report for {} packages in {}ms", count, System.currentTimeMillis() - startMs);
}
}
use of org.haiku.haikudepotserver.job.model.JobService in project haikudepotserver by haiku.
the class PkgScreenshotImportArchiveJobRunner method run.
@Override
public void run(JobService jobService, PkgScreenshotImportArchiveJobSpecification specification) throws IOException, JobRunnerException {
Preconditions.checkArgument(null != jobService);
Preconditions.checkArgument(null != specification);
Preconditions.checkArgument(null != specification.getInputDataGuid(), "missing input data guid on specification");
Preconditions.checkArgument(null != specification.getImportStrategy(), "missing import strategy on specification");
// this will register the outbound data against the job.
JobDataWithByteSink jobDataWithByteSink = jobService.storeGeneratedData(specification.getGuid(), "download", MediaType.CSV_UTF_8.toString());
Optional<JobDataWithByteSource> jobDataWithByteSourceOptional = jobService.tryObtainData(specification.getInputDataGuid());
if (!jobDataWithByteSourceOptional.isPresent()) {
throw new IllegalStateException("the job data was not able to be found for guid; " + specification.getInputDataGuid());
}
if (!serverRuntime.performInTransaction(() -> {
try (OutputStream outputStream = jobDataWithByteSink.getByteSink().openBufferedStream();
OutputStreamWriter outputStreamWriter = new OutputStreamWriter(outputStream);
CSVWriter writer = new CSVWriter(outputStreamWriter, ',')) {
Map<String, ScreenshotImportMetadatas> metadatas = new HashMap<>();
writer.writeNext(new String[] { "path", "pkg-name", "action", "message", "code" });
// sweep through and collect meta-data about the packages in the tar file.
LOGGER.info("will collect data about packages' screenshots from the archive", metadatas.size());
consumeScreenshotArchiveEntries(jobDataWithByteSourceOptional.get().getByteSource(), (ae) -> collectScreenshotMetadataFromArchive(metadatas, ae.getArchiveInputStream(), ae.getArchiveEntry(), ae.getPkgName(), ae.getOrder()));
LOGGER.info("did collect data about {} packages' screenshots from the archive", metadatas.size());
LOGGER.info("will collect data about persisted packages' screenshots");
collectPersistedScreenshotMetadata(metadatas);
LOGGER.info("did collect data about persisted packages' screenshots");
if (specification.getImportStrategy() == PkgScreenshotImportArchiveJobSpecification.ImportStrategy.REPLACE) {
LOGGER.info("will delete persisted screenshots that are absent from the archive");
int deleted = deletePersistedScreenshotsThatAreNotPresentInArchiveAndReport(writer, metadatas.values());
LOGGER.info("did delete {} persisted screenshots that are absent from the archive", deleted);
}
blendInArtificialOrderings(metadatas.values());
// sweep through the archive again and load in those screenshots that are not already present.
// The ordering of the inbound data should be preserved.
LOGGER.info("will load screenshots from archive", metadatas.size());
consumeScreenshotArchiveEntries(jobDataWithByteSourceOptional.get().getByteSource(), (ae) -> importScreenshotsFromArchiveAndReport(writer, metadatas.get(ae.getPkgName()), ae.getArchiveInputStream(), ae.getArchiveEntry(), ae.getPkgName(), ae.getOrder()));
LOGGER.info("did load screenshots from archive", metadatas.size());
return true;
} catch (IOException e) {
LOGGER.error("unable to complete the job", e);
}
return false;
})) {
throw new JobRunnerException("unable to complete job");
}
}
use of org.haiku.haikudepotserver.job.model.JobService in project haikudepotserver by haiku.
the class PkgVersionPayloadLengthPopulationJobRunner method run.
@Override
public void run(JobService jobService, PkgVersionPayloadLengthPopulationJobSpecification specification) throws IOException {
Preconditions.checkArgument(null != jobService);
Preconditions.checkArgument(null != specification);
ObjectContext context = serverRuntime.newContext();
// we want to fetch the ObjectIds of PkgVersions that need to be handled.
List<PkgVersion> pkgVersions = ObjectSelect.query(PkgVersion.class).where(PkgVersion.ACTIVE.isTrue()).and(PkgVersion.PKG.dot(Pkg.ACTIVE).isTrue()).and(PkgVersion.IS_LATEST.isTrue()).and(PkgVersion.PAYLOAD_LENGTH.isNull()).pageSize(50).select(context);
LOGGER.info("did find {} package versions that need payload lengths to be populated", pkgVersions.size());
for (int i = 0; i < pkgVersions.size(); i++) {
PkgVersion pkgVersion = pkgVersions.get(i);
Optional<URL> urlOptional = pkgVersion.tryGetHpkgURL(ExposureType.INTERNAL_FACING);
if (urlOptional.isPresent()) {
try {
urlHelperService.tryGetPayloadLength(urlOptional.get()).filter(l -> l > 0L).ifPresent(l -> {
pkgVersion.setPayloadLength(l);
context.commitChanges();
});
} catch (IOException ioe) {
LOGGER.error("unable to get the payload length for " + pkgVersion.toString(), ioe);
}
} else {
LOGGER.info("unable to get the length of [{}] because no url" + "hpkg url was able to be obtained", pkgVersion);
}
jobService.setJobProgressPercent(specification.getGuid(), i * 100 / pkgVersions.size());
}
}
use of org.haiku.haikudepotserver.job.model.JobService in project haikudepotserver by haiku.
the class RepositoryHpkrIngressJobRunner method run.
@Override
public void run(JobService jobService, RepositoryHpkrIngressJobSpecification specification) {
Preconditions.checkNotNull(specification);
ObjectContext mainContext = serverRuntime.newContext();
Set<String> allowedRepositorySourceCodes = specification.getRepositorySourceCodes();
RepositorySource.findActiveByRepository(mainContext, Repository.getByCode(mainContext, specification.getRepositoryCode())).stream().filter(rs -> null == allowedRepositorySourceCodes || allowedRepositorySourceCodes.contains(rs.getCode())).forEach(rs -> serverRuntime.performInTransaction(() -> {
try {
runForRepositorySource(mainContext, rs);
} catch (Throwable e) {
LOGGER.error("a problem has arisen processing a repository file for repository source [{}]", rs.getCode(), e);
}
return null;
}));
}
use of org.haiku.haikudepotserver.job.model.JobService in project haikudepotserver by haiku.
the class AuthorizationRulesSpreadsheetJobRunner method run.
@Override
public void run(JobService jobService, AuthorizationRulesSpreadsheetJobSpecification specification) throws IOException, JobRunnerException {
final ObjectContext context = serverRuntime.newContext();
DateTimeFormatter dateTimeFormatter = DateTimeHelper.createStandardDateTimeFormat();
// this will register the outbound data against the job.
JobDataWithByteSink jobDataWithByteSink = jobService.storeGeneratedData(specification.getGuid(), "download", MediaType.CSV_UTF_8.toString());
try (OutputStream outputStream = jobDataWithByteSink.getByteSink().openBufferedStream();
OutputStreamWriter outputStreamWriter = new OutputStreamWriter(outputStream);
CSVWriter writer = new CSVWriter(outputStreamWriter, ',')) {
writer.writeNext(new String[] { "create-timestamp", "user-nickname", "user-active", "permission-code", "permission-name", "pkg-name" });
ObjectSelect<PermissionUserPkg> objectSelect = ObjectSelect.query(PermissionUserPkg.class).orderBy(PermissionUserPkg.USER.dot(User.NICKNAME).asc(), PermissionUserPkg.PERMISSION.dot(Permission.CODE).asc());
try (ResultBatchIterator<PermissionUserPkg> batchIterator = objectSelect.batchIterator(context, 50)) {
batchIterator.forEach((pups) -> pups.forEach((pup) -> writer.writeNext(new String[] { dateTimeFormatter.format(Instant.ofEpochMilli(pup.getCreateTimestamp().getTime())), pup.getUser().getNickname(), Boolean.toString(pup.getUser().getActive()), pup.getPermission().getCode(), pup.getPermission().getName(), null != pup.getPkg() ? pup.getPkg().getName() : "" })));
}
writer.flush();
outputStreamWriter.flush();
}
}
Aggregations