use of org.haiku.haikudepotserver.job.model.JobDataWithByteSink in project haikudepotserver by haiku.
the class PkgCategoryCoverageExportSpreadsheetJobRunner method run.
@Override
public void run(JobService jobService, PkgCategoryCoverageExportSpreadsheetJobSpecification specification) throws IOException {
Preconditions.checkArgument(null != jobService);
Preconditions.checkArgument(null != specification);
final ObjectContext context = serverRuntime.newContext();
// this will register the outbound data against the job.
JobDataWithByteSink jobDataWithByteSink = jobService.storeGeneratedData(specification.getGuid(), "download", MediaType.CSV_UTF_8.toString());
try (OutputStream outputStream = jobDataWithByteSink.getByteSink().openBufferedStream();
OutputStreamWriter outputStreamWriter = new OutputStreamWriter(outputStream);
CSVWriter writer = new CSVWriter(outputStreamWriter, ',')) {
// headers
final List<String> pkgCategoryCodes = getPkgCategoryCodes();
String[] headings = getHeadingRow(pkgCategoryCodes);
long startMs = System.currentTimeMillis();
writer.writeNext(headings);
// stream out the packages.
LOGGER.info("will produce category coverage spreadsheet report");
long count = pkgService.eachPkg(context, false, pkg -> {
PkgSupplement pkgSupplement = pkg.getPkgSupplement();
List<String> cols = new ArrayList<>();
Optional<PkgVersionLocalization> locOptional = Optional.empty();
if (null != pkg) {
locOptional = PkgVersionLocalization.getAnyPkgVersionLocalizationForPkg(context, pkg);
}
cols.add(pkg.getName());
cols.add(repositoryService.getRepositoriesForPkg(context, pkg).stream().map(Repository::getCode).collect(Collectors.joining(";")));
cols.add(locOptional.isPresent() ? locOptional.get().getSummary().orElse("") : "");
cols.add(pkgSupplement.getPkgPkgCategories().isEmpty() ? AbstractJobRunner.MARKER : "");
for (String pkgCategoryCode : pkgCategoryCodes) {
cols.add(pkgSupplement.getPkgPkgCategory(pkgCategoryCode).isPresent() ? AbstractJobRunner.MARKER : "");
}
// no action
cols.add("");
writer.writeNext(cols.toArray(new String[cols.size()]));
// keep going!
return true;
});
LOGGER.info("did produce category coverage spreadsheet report for {} packages in {}ms", count, System.currentTimeMillis() - startMs);
}
}
use of org.haiku.haikudepotserver.job.model.JobDataWithByteSink in project haikudepotserver by haiku.
the class PkgCategoryCoverageImportSpreadsheetJobRunner method run.
@Override
public void run(JobService jobService, PkgCategoryCoverageImportSpreadsheetJobSpecification specification) throws IOException, JobRunnerException {
Preconditions.checkArgument(null != jobService);
Preconditions.checkArgument(null != specification);
Preconditions.checkArgument(null != specification.getInputDataGuid(), "missing imput data guid on specification");
// this will register the outbound data against the job.
JobDataWithByteSink jobDataWithByteSink = jobService.storeGeneratedData(specification.getGuid(), "download", MediaType.CSV_UTF_8.toString());
// if there is input data then feed it in and process it to manipulate the packages'
// categories.
Optional<JobDataWithByteSource> jobDataWithByteSourceOptional = jobService.tryObtainData(specification.getInputDataGuid());
if (jobDataWithByteSourceOptional.isEmpty()) {
throw new IllegalStateException("the job data was not able to be found for guid; " + specification.getInputDataGuid());
}
try (OutputStream outputStream = jobDataWithByteSink.getByteSink().openBufferedStream();
OutputStreamWriter outputStreamWriter = new OutputStreamWriter(outputStream);
CSVWriter writer = new CSVWriter(outputStreamWriter, ',');
InputStream inputStream = jobDataWithByteSourceOptional.get().getByteSource().openStream();
InputStreamReader inputStreamReader = new InputStreamReader(inputStream);
CSVReader reader = new CSVReader(inputStreamReader)) {
// headers
List<String> pkgCategoryCodes = getPkgCategoryCodes();
String[] headings = getHeadingRow(pkgCategoryCodes);
// read in the first row of the input and check the headings are there to quasi-validate
// that the input is not some random rubbish.
String[] headerRow = reader.readNext();
if (headings.length != headerRow.length) {
throw new JobRunnerException("wrong number of header columns in input");
}
if (!Arrays.equals(headerRow, headings)) {
throw new JobRunnerException("mismatched input headers");
}
writer.writeNext(headings);
serverRuntime.performInTransaction(() -> {
try {
String[] row;
while (null != (row = reader.readNext())) {
if (0 != row.length) {
ObjectContext rowContext = serverRuntime.newContext();
Action action = Action.NOACTION;
if (row.length < headings.length - 1) {
// -1 because it is possible to omit the action column.
action = Action.INVALID;
LOGGER.warn("inconsistent number of cells on line");
} else {
String pkgName = row[0];
// 1; display
boolean isNone = AbstractJobRunner.MARKER.equals(row[COLUMN_NONE]);
Optional<Pkg> pkgOptional = Pkg.tryGetByName(rowContext, pkgName);
List<String> selectedPkgCategoryCodes = new ArrayList<>();
if (pkgOptional.isPresent()) {
for (int i = 0; i < pkgCategoryCodes.size(); i++) {
if (AbstractJobRunner.MARKER.equals(row[COLUMN_NONE + 1 + i].trim())) {
if (isNone) {
action = Action.INVALID;
LOGGER.warn("line for package {} has 'none' marked as well as an actual category", row[0]);
}
selectedPkgCategoryCodes.add(pkgCategoryCodes.get(i));
}
}
if (action == Action.NOACTION) {
List<PkgCategory> selectedPkgCategories = PkgCategory.getByCodes(rowContext, selectedPkgCategoryCodes);
if (selectedPkgCategories.size() != selectedPkgCategoryCodes.size()) {
throw new IllegalStateException("one or more of the package category codes was not able to be found");
}
if (pkgService.updatePkgCategories(rowContext, pkgOptional.get(), selectedPkgCategories)) {
action = Action.UPDATED;
rowContext.commitChanges();
LOGGER.debug("did update for package {}", row[0]);
}
}
} else {
action = Action.NOTFOUND;
LOGGER.debug("unable to find the package for {}", row[0]);
}
}
// copy the row back verbatim, but with the action result at the
// end.
List<String> rowOutput = new ArrayList<>();
Collections.addAll(rowOutput, row);
while (rowOutput.size() < headings.length) {
rowOutput.add("");
}
rowOutput.remove(rowOutput.size() - 1);
rowOutput.add(action.name());
writer.writeNext(rowOutput.toArray(new String[0]));
}
}
} catch (Throwable th) {
LOGGER.error("a problem has arisen importing package categories from a spreadsheet", th);
}
return null;
});
}
}
use of org.haiku.haikudepotserver.job.model.JobDataWithByteSink in project haikudepotserver by haiku.
the class PkgDumpExportJobRunner method run.
@Override
public void run(JobService jobService, PkgDumpExportJobSpecification specification) throws IOException {
// this will register the outbound data against the job.
JobDataWithByteSink jobDataWithByteSink = jobService.storeGeneratedData(specification.getGuid(), "download", MediaType.JSON_UTF_8.toString());
try (final OutputStream outputStream = jobDataWithByteSink.getByteSink().openBufferedStream();
final GZIPOutputStream gzipOutputStream = new GZIPOutputStream(outputStream);
final JsonGenerator jsonGenerator = objectMapper.getFactory().createGenerator(gzipOutputStream)) {
jsonGenerator.writeStartObject();
writeInfo(jsonGenerator, specification);
writePkgs(jsonGenerator, specification);
jsonGenerator.writeEndObject();
}
}
use of org.haiku.haikudepotserver.job.model.JobDataWithByteSink in project haikudepotserver by haiku.
the class PkgIconImportArchiveJobRunner method run.
@Override
public void run(JobService jobService, PkgIconImportArchiveJobSpecification specification) throws IOException, JobRunnerException {
Preconditions.checkArgument(null != jobService);
Preconditions.checkArgument(null != specification);
Preconditions.checkArgument(null != specification.getInputDataGuid(), "missing input data guid on specification");
// this will register the outbound data against the job.
JobDataWithByteSink jobDataWithByteSink = jobService.storeGeneratedData(specification.getGuid(), "download", MediaType.CSV_UTF_8.toString());
Optional<JobDataWithByteSource> jobDataWithByteSourceOptional = jobService.tryObtainData(specification.getInputDataGuid());
if (!jobDataWithByteSourceOptional.isPresent()) {
throw new IllegalStateException("the job data was not able to be found for guid; " + specification.getInputDataGuid());
}
if (!serverRuntime.performInTransaction(() -> {
try (OutputStream outputStream = jobDataWithByteSink.getByteSink().openBufferedStream();
OutputStreamWriter outputStreamWriter = new OutputStreamWriter(outputStream);
CSVWriter writer = new CSVWriter(outputStreamWriter, ',')) {
String[] headings = new String[] { "path", "action", "message" };
writer.writeNext(headings);
try (InputStream inputStream = jobDataWithByteSourceOptional.get().getByteSource().openStream();
GZIPInputStream gzipInputStream = new GZIPInputStream(inputStream);
TarArchiveInputStream tarArchiveInputStream = new TarArchiveInputStream(gzipInputStream)) {
clearPackagesIconsAppearingInArchive(tarArchiveInputStream, writer);
}
try (InputStream inputStream = jobDataWithByteSourceOptional.get().getByteSource().openStream();
GZIPInputStream gzipInputStream = new GZIPInputStream(inputStream);
TarArchiveInputStream tarArchiveInputStream = new TarArchiveInputStream(gzipInputStream)) {
processEntriesFromArchive(tarArchiveInputStream, writer);
}
return true;
} catch (IOException e) {
LOGGER.error("unable to complete job; ", e);
}
return false;
})) {
throw new JobRunnerException("unable to complete job");
}
}
use of org.haiku.haikudepotserver.job.model.JobDataWithByteSink in project haikudepotserver by haiku.
the class AuthorizationRulesSpreadsheetJobRunner method run.
@Override
public void run(JobService jobService, AuthorizationRulesSpreadsheetJobSpecification specification) throws IOException, JobRunnerException {
final ObjectContext context = serverRuntime.newContext();
DateTimeFormatter dateTimeFormatter = DateTimeHelper.createStandardDateTimeFormat();
// this will register the outbound data against the job.
JobDataWithByteSink jobDataWithByteSink = jobService.storeGeneratedData(specification.getGuid(), "download", MediaType.CSV_UTF_8.toString());
try (OutputStream outputStream = jobDataWithByteSink.getByteSink().openBufferedStream();
OutputStreamWriter outputStreamWriter = new OutputStreamWriter(outputStream);
CSVWriter writer = new CSVWriter(outputStreamWriter, ',')) {
writer.writeNext(new String[] { "create-timestamp", "user-nickname", "user-active", "permission-code", "permission-name", "pkg-name" });
ObjectSelect<PermissionUserPkg> objectSelect = ObjectSelect.query(PermissionUserPkg.class).orderBy(PermissionUserPkg.USER.dot(User.NICKNAME).asc(), PermissionUserPkg.PERMISSION.dot(Permission.CODE).asc());
try (ResultBatchIterator<PermissionUserPkg> batchIterator = objectSelect.batchIterator(context, 50)) {
batchIterator.forEach((pups) -> pups.forEach((pup) -> writer.writeNext(new String[] { dateTimeFormatter.format(Instant.ofEpochMilli(pup.getCreateTimestamp().getTime())), pup.getUser().getNickname(), Boolean.toString(pup.getUser().getActive()), pup.getPermission().getCode(), pup.getPermission().getName(), null != pup.getPkg() ? pup.getPkg().getName() : "" })));
}
writer.flush();
outputStreamWriter.flush();
}
}
Aggregations