use of com.bakdata.conquery.models.execution.ManagedExecution in project conquery by bakdata.
the class QueryCleanupTask method execute.
@Override
public void execute(Map<String, List<String>> parameters, PrintWriter output) throws Exception {
Duration queryExpiration = this.queryExpiration;
if (parameters.containsKey(EXPIRATION_PARAM)) {
if (parameters.get(EXPIRATION_PARAM).size() > 1) {
log.warn("Will not respect more than one expiration time. Have `{}`", parameters.get(EXPIRATION_PARAM));
}
queryExpiration = Duration.parse(parameters.get(EXPIRATION_PARAM).get(0));
}
if (queryExpiration == null) {
throw new IllegalArgumentException("Query Expiration may not be null");
}
log.info("Starting deletion of queries older than {} of {}", queryExpiration, storage.getAllExecutions().size());
// Iterate for as long as no changes are needed (this is because queries can be referenced by other queries)
while (true) {
final QueryUtils.AllReusedFinder reusedChecker = new QueryUtils.AllReusedFinder();
Set<ManagedExecution<?>> toDelete = new HashSet<>();
for (ManagedExecution<?> execution : storage.getAllExecutions()) {
// Gather all referenced queries via reused checker.
if (execution instanceof ManagedQuery) {
((ManagedQuery) execution).getQuery().visit(reusedChecker);
} else if (execution instanceof ManagedForm) {
((ManagedForm) execution).getFlatSubQueries().values().forEach(q -> q.getQuery().visit(reusedChecker));
}
if (execution.isShared()) {
continue;
}
log.trace("{} is not shared", execution.getId());
if (ArrayUtils.isNotEmpty(execution.getTags())) {
continue;
}
log.trace("{} has no tags", execution.getId());
if (execution.getLabel() != null && !isDefaultLabel(execution.getLabel())) {
continue;
}
log.trace("{} has no label", execution.getId());
if (LocalDateTime.now().minus(queryExpiration).isBefore(execution.getCreationTime())) {
continue;
}
log.trace("{} is not older than {}.", execution.getId(), queryExpiration);
toDelete.add(execution);
}
// remove all queries referenced in reused queries.
final Collection<ManagedExecution<?>> referenced = reusedChecker.getReusedElements().stream().map(CQReusedQuery::getQueryId).map(storage::getExecution).collect(Collectors.toSet());
toDelete.removeAll(referenced);
if (toDelete.isEmpty()) {
log.info("No queries to delete");
break;
}
log.info("Deleting {} Executions", toDelete.size());
for (ManagedExecution<?> execution : toDelete) {
log.trace("Deleting Execution[{}]", execution.getId());
storage.removeExecution(execution.getId());
}
}
}
use of com.bakdata.conquery.models.execution.ManagedExecution in project conquery by bakdata.
the class QueryProcessor method postQuery.
/**
* Creates a query for all datasets, then submits it for execution on the
* intended dataset.
*/
public ManagedExecution<?> postQuery(Dataset dataset, QueryDescription query, Subject subject) {
log.info("Query posted on Dataset[{}] by User[{{}].", dataset.getId(), subject.getId());
// This maps works as long as we have query visitors that are not configured in anyway.
// So adding a visitor twice would replace the previous one but both would have yielded the same result.
// For the future a better data structure might be desired that also regards similar QueryVisitors of different configuration
ClassToInstanceMap<QueryVisitor> visitors = MutableClassToInstanceMap.create();
query.addVisitors(visitors);
// Initialize checks that need to traverse the query tree
visitors.putInstance(QueryUtils.OnlyReusingChecker.class, new QueryUtils.OnlyReusingChecker());
visitors.putInstance(NamespacedIdentifiableCollector.class, new NamespacedIdentifiableCollector());
final String primaryGroupName = AuthorizationHelper.getPrimaryGroup(subject, storage).map(Group::getName).orElse("none");
visitors.putInstance(ExecutionMetrics.QueryMetricsReporter.class, new ExecutionMetrics.QueryMetricsReporter(primaryGroupName));
// Chain all Consumers
Consumer<Visitable> consumerChain = QueryUtils.getNoOpEntryPoint();
for (QueryVisitor visitor : visitors.values()) {
consumerChain = consumerChain.andThen(visitor);
}
// Apply consumers to the query tree
query.visit(consumerChain);
query.authorize(subject, dataset, visitors);
// After all authorization checks we can now use the actual subject to invoke the query and do not to bubble down the Userish in methods
ExecutionMetrics.reportNamespacedIds(visitors.getInstance(NamespacedIdentifiableCollector.class).getIdentifiables(), primaryGroupName);
ExecutionMetrics.reportQueryClassUsage(query.getClass(), primaryGroupName);
final Namespace namespace = datasetRegistry.get(dataset.getId());
final ExecutionManager executionManager = namespace.getExecutionManager();
// If this is only a re-executing query, try to execute the underlying query instead.
{
final Optional<ManagedExecutionId> executionId = visitors.getInstance(QueryUtils.OnlyReusingChecker.class).getOnlyReused();
final Optional<ManagedExecution<?>> execution = executionId.map(id -> tryReuse(query, id, datasetRegistry, config, executionManager, subject.getUser()));
if (execution.isPresent()) {
return execution.get();
}
}
// Execute the query
return executionManager.runQuery(datasetRegistry, query, subject.getUser(), dataset, config);
}
use of com.bakdata.conquery.models.execution.ManagedExecution in project conquery by bakdata.
the class ResultExcelProcessor method getExcelResult.
public <E extends ManagedExecution<?> & SingleTableResult> Response getExcelResult(Subject subject, E exec, DatasetId datasetId, boolean pretty) {
ConqueryMDC.setLocation(subject.getName());
final Namespace namespace = datasetRegistry.get(datasetId);
Dataset dataset = namespace.getDataset();
subject.authorize(dataset, Ability.READ);
subject.authorize(dataset, Ability.DOWNLOAD);
subject.authorize(exec, Ability.READ);
IdPrinter idPrinter = config.getFrontend().getQueryUpload().getIdPrinter(subject, exec, namespace);
final Locale locale = I18n.LOCALE.get();
PrintSettings settings = new PrintSettings(pretty, locale, datasetRegistry, config, idPrinter::createId);
ExcelRenderer excelRenderer = new ExcelRenderer(config.getExcel(), settings);
StreamingOutput out = output -> excelRenderer.renderToStream(config.getFrontend().getQueryUpload().getIdResultInfos(), (ManagedExecution<?> & SingleTableResult) exec, output);
return makeResponseWithFileName(out, exec.getLabelWithoutAutoLabelSuffix(), "xlsx", MEDIA_TYPE, ResultUtil.ContentDispositionOption.ATTACHMENT);
}
use of com.bakdata.conquery.models.execution.ManagedExecution in project conquery by bakdata.
the class TestConquery method isBusy.
private boolean isBusy() {
boolean busy;
busy = standaloneCommand.getManager().getJobManager().isSlowWorkerBusy();
busy |= standaloneCommand.getManager().getStorage().getAllExecutions().stream().map(ManagedExecution::getState).anyMatch(ExecutionState.RUNNING::equals);
for (Namespace namespace : standaloneCommand.getManager().getDatasetRegistry().getDatasets()) {
busy |= namespace.getJobManager().isSlowWorkerBusy();
}
for (ShardNode slave : standaloneCommand.getShardNodes()) {
busy |= slave.isBusy();
}
return busy;
}
use of com.bakdata.conquery.models.execution.ManagedExecution in project conquery by bakdata.
the class ResultArrowProcessor method getArrowResult.
public static <E extends ManagedExecution<?> & SingleTableResult> Response getArrowResult(Function<OutputStream, Function<VectorSchemaRoot, ArrowWriter>> writerProducer, Subject subject, E exec, Dataset dataset, DatasetRegistry datasetRegistry, boolean pretty, String fileExtension, MediaType mediaType, ConqueryConfig config) {
final Namespace namespace = datasetRegistry.get(dataset.getId());
ConqueryMDC.setLocation(subject.getName());
log.info("Downloading results for {} on dataset {}", exec, dataset);
subject.authorize(dataset, Ability.READ);
subject.authorize(dataset, Ability.DOWNLOAD);
subject.authorize(exec, Ability.READ);
// Check if subject is permitted to download on all datasets that were referenced by the query
authorizeDownloadDatasets(subject, exec);
if (!(exec instanceof ManagedQuery || (exec instanceof ManagedForm && ((ManagedForm) exec).getSubQueries().size() == 1))) {
return Response.status(HttpStatus.SC_UNPROCESSABLE_ENTITY, "Execution result is not a single Table").build();
}
// Get the locale extracted by the LocaleFilter
IdPrinter idPrinter = config.getFrontend().getQueryUpload().getIdPrinter(subject, exec, namespace);
final Locale locale = I18n.LOCALE.get();
PrintSettings settings = new PrintSettings(pretty, locale, datasetRegistry, config, idPrinter::createId);
// Collect ResultInfos for id columns and result columns
final List<ResultInfo> resultInfosId = config.getFrontend().getQueryUpload().getIdResultInfos();
final List<ResultInfo> resultInfosExec = exec.getResultInfos();
StreamingOutput out = output -> renderToStream(writerProducer.apply(output), settings, config.getArrow().getBatchSize(), resultInfosId, resultInfosExec, exec.streamResults());
return makeResponseWithFileName(out, exec.getLabelWithoutAutoLabelSuffix(), fileExtension, mediaType, ResultUtil.ContentDispositionOption.ATTACHMENT);
}
Aggregations