use of com.bakdata.conquery.apiv1.query.Query in project conquery by bakdata.
the class ManagedQuery method makeDefaultLabel.
/**
* Creates a default label based on the submitted {@link QueryDescription}.
* The Label is customized by mentioning that a description contained a
* {@link CQExternal}, {@link CQReusedQuery} or {@link CQConcept}, in this order.
* In case of one ore more {@link CQConcept} the distinct labels of the concepts are chosen
* and concatinated until a length of {@value #MAX_CONCEPT_LABEL_CONCAT_LENGTH} is reached.
* All further labels are dropped.
*/
@Override
protected String makeDefaultLabel(PrintSettings cfg) {
final StringBuilder sb = new StringBuilder();
final Map<Class<? extends Visitable>, List<Visitable>> sortedContents = Visitable.stream(query).collect(Collectors.groupingBy(Visitable::getClass));
int sbStartSize = sb.length();
// Check for CQExternal
List<Visitable> externals = sortedContents.getOrDefault(CQExternal.class, Collections.emptyList());
if (!externals.isEmpty()) {
if (sb.length() > 0) {
sb.append(" ");
}
sb.append(C10N.get(CQElementC10n.class, I18n.LOCALE.get()).external());
}
// Check for CQReused
if (sortedContents.containsKey(CQReusedQuery.class)) {
if (sb.length() > 0) {
sb.append(" ");
}
sb.append(C10N.get(CQElementC10n.class, I18n.LOCALE.get()).reused());
}
// Check for CQConcept
if (sortedContents.containsKey(CQConcept.class)) {
if (sb.length() > 0) {
sb.append(" ");
}
// Track length of text we are appending for concepts.
final AtomicInteger length = new AtomicInteger();
sortedContents.get(CQConcept.class).stream().map(CQConcept.class::cast).map(c -> makeLabelWithRootAndChild(c, cfg)).filter(Predicate.not(Strings::isNullOrEmpty)).distinct().takeWhile(elem -> length.addAndGet(elem.length()) < MAX_CONCEPT_LABEL_CONCAT_LENGTH).forEach(label -> sb.append(label).append(" "));
// Last entry will output one Space that we don't want
if (sb.length() > 0) {
sb.deleteCharAt(sb.length() - 1);
}
// If not all Concept could be included in the name, point that out
if (length.get() > MAX_CONCEPT_LABEL_CONCAT_LENGTH) {
sb.append(" ").append(C10N.get(CQElementC10n.class, I18n.LOCALE.get()).furtherConcepts());
}
}
// Fallback to id if nothing could be extracted from the query description
if (sbStartSize == sb.length()) {
sb.append(getId().getExecution());
}
return sb.toString();
}
use of com.bakdata.conquery.apiv1.query.Query in project conquery by bakdata.
the class QueryCleanupTask method execute.
@Override
public void execute(Map<String, List<String>> parameters, PrintWriter output) throws Exception {
Duration queryExpiration = this.queryExpiration;
if (parameters.containsKey(EXPIRATION_PARAM)) {
if (parameters.get(EXPIRATION_PARAM).size() > 1) {
log.warn("Will not respect more than one expiration time. Have `{}`", parameters.get(EXPIRATION_PARAM));
}
queryExpiration = Duration.parse(parameters.get(EXPIRATION_PARAM).get(0));
}
if (queryExpiration == null) {
throw new IllegalArgumentException("Query Expiration may not be null");
}
log.info("Starting deletion of queries older than {} of {}", queryExpiration, storage.getAllExecutions().size());
// Iterate for as long as no changes are needed (this is because queries can be referenced by other queries)
while (true) {
final QueryUtils.AllReusedFinder reusedChecker = new QueryUtils.AllReusedFinder();
Set<ManagedExecution<?>> toDelete = new HashSet<>();
for (ManagedExecution<?> execution : storage.getAllExecutions()) {
// Gather all referenced queries via reused checker.
if (execution instanceof ManagedQuery) {
((ManagedQuery) execution).getQuery().visit(reusedChecker);
} else if (execution instanceof ManagedForm) {
((ManagedForm) execution).getFlatSubQueries().values().forEach(q -> q.getQuery().visit(reusedChecker));
}
if (execution.isShared()) {
continue;
}
log.trace("{} is not shared", execution.getId());
if (ArrayUtils.isNotEmpty(execution.getTags())) {
continue;
}
log.trace("{} has no tags", execution.getId());
if (execution.getLabel() != null && !isDefaultLabel(execution.getLabel())) {
continue;
}
log.trace("{} has no label", execution.getId());
if (LocalDateTime.now().minus(queryExpiration).isBefore(execution.getCreationTime())) {
continue;
}
log.trace("{} is not older than {}.", execution.getId(), queryExpiration);
toDelete.add(execution);
}
// remove all queries referenced in reused queries.
final Collection<ManagedExecution<?>> referenced = reusedChecker.getReusedElements().stream().map(CQReusedQuery::getQueryId).map(storage::getExecution).collect(Collectors.toSet());
toDelete.removeAll(referenced);
if (toDelete.isEmpty()) {
log.info("No queries to delete");
break;
}
log.info("Deleting {} Executions", toDelete.size());
for (ManagedExecution<?> execution : toDelete) {
log.trace("Deleting Execution[{}]", execution.getId());
storage.removeExecution(execution.getId());
}
}
}
use of com.bakdata.conquery.apiv1.query.Query in project conquery by bakdata.
the class QueryProcessor method uploadEntities.
/**
* Try to resolve the external upload, if successful, create query for the subject and return id and statistics for that.
*/
public ExternalUploadResult uploadEntities(Subject subject, Dataset dataset, ExternalUpload upload) {
final CQExternal.ResolveStatistic statistic = CQExternal.resolveEntities(upload.getValues(), upload.getFormat(), datasetRegistry.get(dataset.getId()).getStorage().getIdMapping(), config.getFrontend().getQueryUpload(), config.getLocale().getDateReader());
// Resolving nothing is a problem thus we fail.
if (statistic.getResolved().isEmpty()) {
throw new BadRequestException(Response.status(Response.Status.BAD_REQUEST).entity(new ExternalUploadResult(null, 0, statistic.getUnresolvedId(), statistic.getUnreadableDate())).build());
}
final ConceptQuery query = new ConceptQuery(new CQExternal(upload.getFormat(), upload.getValues()));
// We only create the Query, really no need to execute it as it's only useful for composition.
final ManagedQuery execution = ((ManagedQuery) datasetRegistry.get(dataset.getId()).getExecutionManager().createExecution(datasetRegistry, query, subject.getUser(), dataset));
execution.setLastResultCount((long) statistic.getResolved().size());
if (upload.getLabel() != null) {
execution.setLabel(upload.getLabel());
}
execution.initExecutable(datasetRegistry, config);
return new ExternalUploadResult(execution.getId(), statistic.getResolved().size(), statistic.getUnresolvedId(), statistic.getUnreadableDate());
}
use of com.bakdata.conquery.apiv1.query.Query in project conquery by bakdata.
the class QueryProcessor method postQuery.
/**
* Creates a query for all datasets, then submits it for execution on the
* intended dataset.
*/
public ManagedExecution<?> postQuery(Dataset dataset, QueryDescription query, Subject subject) {
log.info("Query posted on Dataset[{}] by User[{{}].", dataset.getId(), subject.getId());
// This maps works as long as we have query visitors that are not configured in anyway.
// So adding a visitor twice would replace the previous one but both would have yielded the same result.
// For the future a better data structure might be desired that also regards similar QueryVisitors of different configuration
ClassToInstanceMap<QueryVisitor> visitors = MutableClassToInstanceMap.create();
query.addVisitors(visitors);
// Initialize checks that need to traverse the query tree
visitors.putInstance(QueryUtils.OnlyReusingChecker.class, new QueryUtils.OnlyReusingChecker());
visitors.putInstance(NamespacedIdentifiableCollector.class, new NamespacedIdentifiableCollector());
final String primaryGroupName = AuthorizationHelper.getPrimaryGroup(subject, storage).map(Group::getName).orElse("none");
visitors.putInstance(ExecutionMetrics.QueryMetricsReporter.class, new ExecutionMetrics.QueryMetricsReporter(primaryGroupName));
// Chain all Consumers
Consumer<Visitable> consumerChain = QueryUtils.getNoOpEntryPoint();
for (QueryVisitor visitor : visitors.values()) {
consumerChain = consumerChain.andThen(visitor);
}
// Apply consumers to the query tree
query.visit(consumerChain);
query.authorize(subject, dataset, visitors);
// After all authorization checks we can now use the actual subject to invoke the query and do not to bubble down the Userish in methods
ExecutionMetrics.reportNamespacedIds(visitors.getInstance(NamespacedIdentifiableCollector.class).getIdentifiables(), primaryGroupName);
ExecutionMetrics.reportQueryClassUsage(query.getClass(), primaryGroupName);
final Namespace namespace = datasetRegistry.get(dataset.getId());
final ExecutionManager executionManager = namespace.getExecutionManager();
// If this is only a re-executing query, try to execute the underlying query instead.
{
final Optional<ManagedExecutionId> executionId = visitors.getInstance(QueryUtils.OnlyReusingChecker.class).getOnlyReused();
final Optional<ManagedExecution<?>> execution = executionId.map(id -> tryReuse(query, id, datasetRegistry, config, executionManager, subject.getUser()));
if (execution.isPresent()) {
return execution.get();
}
}
// Execute the query
return executionManager.runQuery(datasetRegistry, query, subject.getUser(), dataset, config);
}
use of com.bakdata.conquery.apiv1.query.Query in project conquery by bakdata.
the class ExecuteForm method react.
@Override
public void react(Worker worker) throws Exception {
log.info("Started Form {}", formId);
// Execution might have been cancelled before so we uncancel it here.
final QueryExecutor queryExecutor = worker.getQueryExecutor();
queryExecutor.unsetQueryCancelled(formId);
// Execute all plans.
for (Entry<ManagedExecutionId, Query> entry : queries.entrySet()) {
final Query query = entry.getValue();
ShardResult result = createResult(worker, entry.getKey());
// Before we start the query, we create it once to test if it will succeed before creating it multiple times for evaluation per core.
try {
query.createQueryPlan(new QueryPlanContext(worker));
} catch (Exception e) {
ConqueryError err = asConqueryError(e);
log.warn("Failed to create query plans for {}.", formId, err);
queryExecutor.sendFailureToManagerNode(result, err);
return;
}
final QueryExecutionContext subQueryContext = new QueryExecutionContext(formId, queryExecutor, worker.getStorage(), worker.getBucketManager());
if (!queryExecutor.execute(query, subQueryContext, result)) {
return;
}
}
}
Aggregations