use of org.haiku.haikudepotserver.job.model.JobRunnerException in project haikudepotserver by haiku.
the class PkgScreenshotImportArchiveJobRunner method run.
@Override
public void run(JobService jobService, PkgScreenshotImportArchiveJobSpecification specification) throws IOException, JobRunnerException {
Preconditions.checkArgument(null != jobService);
Preconditions.checkArgument(null != specification);
Preconditions.checkArgument(null != specification.getInputDataGuid(), "missing input data guid on specification");
Preconditions.checkArgument(null != specification.getImportStrategy(), "missing import strategy on specification");
// this will register the outbound data against the job.
JobDataWithByteSink jobDataWithByteSink = jobService.storeGeneratedData(specification.getGuid(), "download", MediaType.CSV_UTF_8.toString());
Optional<JobDataWithByteSource> jobDataWithByteSourceOptional = jobService.tryObtainData(specification.getInputDataGuid());
if (!jobDataWithByteSourceOptional.isPresent()) {
throw new IllegalStateException("the job data was not able to be found for guid; " + specification.getInputDataGuid());
}
if (!serverRuntime.performInTransaction(() -> {
try (OutputStream outputStream = jobDataWithByteSink.getByteSink().openBufferedStream();
OutputStreamWriter outputStreamWriter = new OutputStreamWriter(outputStream);
CSVWriter writer = new CSVWriter(outputStreamWriter, ',')) {
Map<String, ScreenshotImportMetadatas> metadatas = new HashMap<>();
writer.writeNext(new String[] { "path", "pkg-name", "action", "message", "code" });
// sweep through and collect meta-data about the packages in the tar file.
LOGGER.info("will collect data about packages' screenshots from the archive", metadatas.size());
consumeScreenshotArchiveEntries(jobDataWithByteSourceOptional.get().getByteSource(), (ae) -> collectScreenshotMetadataFromArchive(metadatas, ae.getArchiveInputStream(), ae.getArchiveEntry(), ae.getPkgName(), ae.getOrder()));
LOGGER.info("did collect data about {} packages' screenshots from the archive", metadatas.size());
LOGGER.info("will collect data about persisted packages' screenshots");
collectPersistedScreenshotMetadata(metadatas);
LOGGER.info("did collect data about persisted packages' screenshots");
if (specification.getImportStrategy() == PkgScreenshotImportArchiveJobSpecification.ImportStrategy.REPLACE) {
LOGGER.info("will delete persisted screenshots that are absent from the archive");
int deleted = deletePersistedScreenshotsThatAreNotPresentInArchiveAndReport(writer, metadatas.values());
LOGGER.info("did delete {} persisted screenshots that are absent from the archive", deleted);
}
blendInArtificialOrderings(metadatas.values());
// sweep through the archive again and load in those screenshots that are not already present.
// The ordering of the inbound data should be preserved.
LOGGER.info("will load screenshots from archive", metadatas.size());
consumeScreenshotArchiveEntries(jobDataWithByteSourceOptional.get().getByteSource(), (ae) -> importScreenshotsFromArchiveAndReport(writer, metadatas.get(ae.getPkgName()), ae.getArchiveInputStream(), ae.getArchiveEntry(), ae.getPkgName(), ae.getOrder()));
LOGGER.info("did load screenshots from archive", metadatas.size());
return true;
} catch (IOException e) {
LOGGER.error("unable to complete the job", e);
}
return false;
})) {
throw new JobRunnerException("unable to complete job");
}
}
use of org.haiku.haikudepotserver.job.model.JobRunnerException in project haikudepotserver by haiku.
the class PkgScreenshotOptimizationJobRunner method run.
@Override
public void run(JobService jobService, PkgScreenshotOptimizationJobSpecification specification) throws JobRunnerException {
Preconditions.checkArgument(null != jobService);
Preconditions.checkArgument(null != specification);
long startMs = System.currentTimeMillis();
LOGGER.info("will optimize {} screenshot images", specification.getPkgScreenshotCodes().size());
for (String pkgScreenshotCode : specification.getPkgScreenshotCodes()) {
ObjectContext context = serverRuntime.newContext();
Optional<PkgScreenshot> pkgScreenshotOptional = PkgScreenshot.tryGetByCode(context, pkgScreenshotCode);
if (pkgScreenshotOptional.isPresent()) {
try {
if (screenshotService.optimizeScreenshot(context, pkgScreenshotOptional.get())) {
context.commitChanges();
}
} catch (IOException ioe) {
throw new UncheckedIOException(ioe);
} catch (BadPkgScreenshotException bpse) {
throw new JobRunnerException("unable to process a screenshot image", bpse);
}
}
LOGGER.info("did optimize {} screenshot images in {}ms", specification.getPkgScreenshotCodes().size(), System.currentTimeMillis() - startMs);
}
}
use of org.haiku.haikudepotserver.job.model.JobRunnerException in project haikudepotserver by haiku.
the class PkgCategoryCoverageImportSpreadsheetJobRunner method run.
@Override
public void run(JobService jobService, PkgCategoryCoverageImportSpreadsheetJobSpecification specification) throws IOException, JobRunnerException {
Preconditions.checkArgument(null != jobService);
Preconditions.checkArgument(null != specification);
Preconditions.checkArgument(null != specification.getInputDataGuid(), "missing imput data guid on specification");
// this will register the outbound data against the job.
JobDataWithByteSink jobDataWithByteSink = jobService.storeGeneratedData(specification.getGuid(), "download", MediaType.CSV_UTF_8.toString());
// if there is input data then feed it in and process it to manipulate the packages'
// categories.
Optional<JobDataWithByteSource> jobDataWithByteSourceOptional = jobService.tryObtainData(specification.getInputDataGuid());
if (jobDataWithByteSourceOptional.isEmpty()) {
throw new IllegalStateException("the job data was not able to be found for guid; " + specification.getInputDataGuid());
}
try (OutputStream outputStream = jobDataWithByteSink.getByteSink().openBufferedStream();
OutputStreamWriter outputStreamWriter = new OutputStreamWriter(outputStream);
CSVWriter writer = new CSVWriter(outputStreamWriter, ',');
InputStream inputStream = jobDataWithByteSourceOptional.get().getByteSource().openStream();
InputStreamReader inputStreamReader = new InputStreamReader(inputStream);
CSVReader reader = new CSVReader(inputStreamReader)) {
// headers
List<String> pkgCategoryCodes = getPkgCategoryCodes();
String[] headings = getHeadingRow(pkgCategoryCodes);
// read in the first row of the input and check the headings are there to quasi-validate
// that the input is not some random rubbish.
String[] headerRow = reader.readNext();
if (headings.length != headerRow.length) {
throw new JobRunnerException("wrong number of header columns in input");
}
if (!Arrays.equals(headerRow, headings)) {
throw new JobRunnerException("mismatched input headers");
}
writer.writeNext(headings);
serverRuntime.performInTransaction(() -> {
try {
String[] row;
while (null != (row = reader.readNext())) {
if (0 != row.length) {
ObjectContext rowContext = serverRuntime.newContext();
Action action = Action.NOACTION;
if (row.length < headings.length - 1) {
// -1 because it is possible to omit the action column.
action = Action.INVALID;
LOGGER.warn("inconsistent number of cells on line");
} else {
String pkgName = row[0];
// 1; display
boolean isNone = AbstractJobRunner.MARKER.equals(row[COLUMN_NONE]);
Optional<Pkg> pkgOptional = Pkg.tryGetByName(rowContext, pkgName);
List<String> selectedPkgCategoryCodes = new ArrayList<>();
if (pkgOptional.isPresent()) {
for (int i = 0; i < pkgCategoryCodes.size(); i++) {
if (AbstractJobRunner.MARKER.equals(row[COLUMN_NONE + 1 + i].trim())) {
if (isNone) {
action = Action.INVALID;
LOGGER.warn("line for package {} has 'none' marked as well as an actual category", row[0]);
}
selectedPkgCategoryCodes.add(pkgCategoryCodes.get(i));
}
}
if (action == Action.NOACTION) {
List<PkgCategory> selectedPkgCategories = PkgCategory.getByCodes(rowContext, selectedPkgCategoryCodes);
if (selectedPkgCategories.size() != selectedPkgCategoryCodes.size()) {
throw new IllegalStateException("one or more of the package category codes was not able to be found");
}
if (pkgService.updatePkgCategories(rowContext, pkgOptional.get(), selectedPkgCategories)) {
action = Action.UPDATED;
rowContext.commitChanges();
LOGGER.debug("did update for package {}", row[0]);
}
}
} else {
action = Action.NOTFOUND;
LOGGER.debug("unable to find the package for {}", row[0]);
}
}
// copy the row back verbatim, but with the action result at the
// end.
List<String> rowOutput = new ArrayList<>();
Collections.addAll(rowOutput, row);
while (rowOutput.size() < headings.length) {
rowOutput.add("");
}
rowOutput.remove(rowOutput.size() - 1);
rowOutput.add(action.name());
writer.writeNext(rowOutput.toArray(new String[0]));
}
}
} catch (Throwable th) {
LOGGER.error("a problem has arisen importing package categories from a spreadsheet", th);
}
return null;
});
}
}
use of org.haiku.haikudepotserver.job.model.JobRunnerException in project haikudepotserver by haiku.
the class PkgIconImportArchiveJobRunner method run.
@Override
public void run(JobService jobService, PkgIconImportArchiveJobSpecification specification) throws IOException, JobRunnerException {
Preconditions.checkArgument(null != jobService);
Preconditions.checkArgument(null != specification);
Preconditions.checkArgument(null != specification.getInputDataGuid(), "missing input data guid on specification");
// this will register the outbound data against the job.
JobDataWithByteSink jobDataWithByteSink = jobService.storeGeneratedData(specification.getGuid(), "download", MediaType.CSV_UTF_8.toString());
Optional<JobDataWithByteSource> jobDataWithByteSourceOptional = jobService.tryObtainData(specification.getInputDataGuid());
if (!jobDataWithByteSourceOptional.isPresent()) {
throw new IllegalStateException("the job data was not able to be found for guid; " + specification.getInputDataGuid());
}
if (!serverRuntime.performInTransaction(() -> {
try (OutputStream outputStream = jobDataWithByteSink.getByteSink().openBufferedStream();
OutputStreamWriter outputStreamWriter = new OutputStreamWriter(outputStream);
CSVWriter writer = new CSVWriter(outputStreamWriter, ',')) {
String[] headings = new String[] { "path", "action", "message" };
writer.writeNext(headings);
try (InputStream inputStream = jobDataWithByteSourceOptional.get().getByteSource().openStream();
GZIPInputStream gzipInputStream = new GZIPInputStream(inputStream);
TarArchiveInputStream tarArchiveInputStream = new TarArchiveInputStream(gzipInputStream)) {
clearPackagesIconsAppearingInArchive(tarArchiveInputStream, writer);
}
try (InputStream inputStream = jobDataWithByteSourceOptional.get().getByteSource().openStream();
GZIPInputStream gzipInputStream = new GZIPInputStream(inputStream);
TarArchiveInputStream tarArchiveInputStream = new TarArchiveInputStream(gzipInputStream)) {
processEntriesFromArchive(tarArchiveInputStream, writer);
}
return true;
} catch (IOException e) {
LOGGER.error("unable to complete job; ", e);
}
return false;
})) {
throw new JobRunnerException("unable to complete job");
}
}
use of org.haiku.haikudepotserver.job.model.JobRunnerException in project haikudepotserver by haiku.
the class AuthorizationRulesSpreadsheetJobRunner method run.
@Override
public void run(JobService jobService, AuthorizationRulesSpreadsheetJobSpecification specification) throws IOException, JobRunnerException {
final ObjectContext context = serverRuntime.newContext();
DateTimeFormatter dateTimeFormatter = DateTimeHelper.createStandardDateTimeFormat();
// this will register the outbound data against the job.
JobDataWithByteSink jobDataWithByteSink = jobService.storeGeneratedData(specification.getGuid(), "download", MediaType.CSV_UTF_8.toString());
try (OutputStream outputStream = jobDataWithByteSink.getByteSink().openBufferedStream();
OutputStreamWriter outputStreamWriter = new OutputStreamWriter(outputStream);
CSVWriter writer = new CSVWriter(outputStreamWriter, ',')) {
writer.writeNext(new String[] { "create-timestamp", "user-nickname", "user-active", "permission-code", "permission-name", "pkg-name" });
ObjectSelect<PermissionUserPkg> objectSelect = ObjectSelect.query(PermissionUserPkg.class).orderBy(PermissionUserPkg.USER.dot(User.NICKNAME).asc(), PermissionUserPkg.PERMISSION.dot(Permission.CODE).asc());
try (ResultBatchIterator<PermissionUserPkg> batchIterator = objectSelect.batchIterator(context, 50)) {
batchIterator.forEach((pups) -> pups.forEach((pup) -> writer.writeNext(new String[] { dateTimeFormatter.format(Instant.ofEpochMilli(pup.getCreateTimestamp().getTime())), pup.getUser().getNickname(), Boolean.toString(pup.getUser().getActive()), pup.getPermission().getCode(), pup.getPermission().getName(), null != pup.getPkg() ? pup.getPkg().getName() : "" })));
}
writer.flush();
outputStreamWriter.flush();
}
}
Aggregations