use of com.bakdata.conquery.apiv1.FullExecutionStatus in project conquery by bakdata.
the class ManagedExecution method buildStatusFull.
/**
* Renders an extensive status of this query (see {@link FullExecutionStatus}. The rendering can be computation intensive and can produce a large
* object. The use of the full status is only intended if a client requested specific information about this execution.
*/
public FullExecutionStatus buildStatusFull(@NonNull MetaStorage storage, Subject subject, DatasetRegistry datasetRegistry, ConqueryConfig config) {
initExecutable(datasetRegistry, config);
FullExecutionStatus status = new FullExecutionStatus();
setStatusBase(subject, status);
setAdditionalFieldsForStatusWithColumnDescription(storage, subject, status, datasetRegistry);
setAdditionalFieldsForStatusWithSource(subject, status);
setAdditionalFieldsForStatusWithGroups(storage, status);
setAvailableSecondaryIds(status);
status.setProgress(progress);
if (getState().equals(ExecutionState.FAILED) && error != null) {
// Use plain format here to have a uniform serialization.
status.setError(error.asPlain());
}
return status;
}
use of com.bakdata.conquery.apiv1.FullExecutionStatus in project conquery by bakdata.
the class AdminResource method getQueries.
@GET
@Path("/queries")
public FullExecutionStatus[] getQueries(@Auth Subject currentUser, @QueryParam("limit") OptionalLong limit, @QueryParam("since") Optional<String> since) {
final MetaStorage storage = processor.getStorage();
final DatasetRegistry datasetRegistry = processor.getDatasetRegistry();
return storage.getAllExecutions().stream().map(t -> {
try {
return t.buildStatusFull(storage, currentUser, datasetRegistry, processor.getConfig());
} catch (ConqueryError e) {
// Initialization of execution probably failed, so we construct a status based on the overview status
final FullExecutionStatus fullExecutionStatus = new FullExecutionStatus();
t.setStatusBase(currentUser, fullExecutionStatus);
fullExecutionStatus.setStatus(ExecutionState.FAILED);
fullExecutionStatus.setError(e);
return fullExecutionStatus;
}
}).filter(t -> t.getCreatedAt().toLocalDate().isEqual(since.map(LocalDate::parse).orElse(LocalDate.now()))).limit(limit.orElse(100)).toArray(FullExecutionStatus[]::new);
}
use of com.bakdata.conquery.apiv1.FullExecutionStatus in project conquery by bakdata.
the class ManagedExecution method setAdditionalFieldsForStatusWithSource.
/**
* Sets additional fields of an {@link ExecutionStatus} when a more specific status is requested.
*/
protected void setAdditionalFieldsForStatusWithSource(Subject subject, FullExecutionStatus status) {
QueryDescription query = getSubmitted();
NamespacedIdentifiableCollector namespacesIdCollector = new NamespacedIdentifiableCollector();
query.visit(namespacesIdCollector);
final Set<Concept> concepts = namespacesIdCollector.getIdentifiables().stream().filter(ConceptElement.class::isInstance).map(ConceptElement.class::cast).map(ConceptElement::getConcept).collect(Collectors.toSet());
boolean canExpand = subject.isPermittedAll(concepts, Ability.READ);
status.setCanExpand(canExpand);
status.setQuery(canExpand ? getSubmitted() : null);
}
use of com.bakdata.conquery.apiv1.FullExecutionStatus in project conquery by bakdata.
the class DownloadLinkGeneration method execute.
@Override
public void execute(StandaloneSupport conquery) throws Exception {
final MetaStorage storage = conquery.getMetaStorage();
final User user = new User("testU", "testU", storage);
final String testJson = In.resource("/tests/query/SIMPLE_TREECONCEPT_QUERY/SIMPLE_TREECONCEPT_Query.test.json").withUTF8().readAll();
final QueryTest test = (QueryTest) JsonIntegrationTest.readJson(conquery.getDataset(), testJson);
storage.updateUser(user);
// Manually import data
ValidatorHelper.failOnError(log, conquery.getValidator().validate(test));
test.importRequiredData(conquery);
// Create execution for download
ManagedQuery exec = new ManagedQuery(test.getQuery(), user, conquery.getDataset());
storage.addExecution(exec);
user.addPermission(DatasetPermission.onInstance(Set.of(Ability.READ), conquery.getDataset().getId()));
{
// Try to generate a download link: should not be possible, because the execution isn't run yet
FullExecutionStatus status = IntegrationUtils.getExecutionStatus(conquery, exec.getId(), user, 200);
assertThat(status.getResultUrls()).isEmpty();
}
{
// Thinker the state of the execution and try again: still not possible because of missing permissions
exec.setState(ExecutionState.DONE);
FullExecutionStatus status = IntegrationUtils.getExecutionStatus(conquery, exec.getId(), user, 200);
assertThat(status.getResultUrls()).isEmpty();
}
{
// Add permission to download: now it should be possible
user.addPermission(DatasetPermission.onInstance(Set.of(Ability.DOWNLOAD), conquery.getDataset().getId()));
FullExecutionStatus status = IntegrationUtils.getExecutionStatus(conquery, exec.getId(), user, 200);
// This Url is missing the `/api` path part, because we use the standard UriBuilder here
assertThat(status.getResultUrls()).contains(new URL(String.format("%s/datasets/%s/result/%s.csv", conquery.defaultApiURIBuilder().toString(), conquery.getDataset().getId(), exec.getId())));
}
}
use of com.bakdata.conquery.apiv1.FullExecutionStatus in project conquery by bakdata.
the class ReusedQueryTest method execute.
@Override
public void execute(String name, TestConquery testConquery) throws Exception {
final StandaloneSupport conquery = testConquery.getSupport(name);
final String testJson = In.resource("/tests/query/SECONDARY_ID_MIXED/SECONDARY_IDS_MIXED.test.json").withUTF8().readAll();
final Dataset dataset = conquery.getDataset();
final QueryTest test = (QueryTest) JsonIntegrationTest.readJson(dataset, testJson);
// Manually import data, so we can do our own work.
{
ValidatorHelper.failOnError(log, conquery.getValidator().validate(test));
importSecondaryIds(conquery, test.getContent().getSecondaryIds());
conquery.waitUntilWorkDone();
LoadingUtil.importTables(conquery, test.getContent().getTables());
conquery.waitUntilWorkDone();
LoadingUtil.importConcepts(conquery, test.getRawConcepts());
conquery.waitUntilWorkDone();
LoadingUtil.importTableContents(conquery, test.getContent().getTables());
conquery.waitUntilWorkDone();
}
final SecondaryIdQuery query = (SecondaryIdQuery) IntegrationUtils.parseQuery(conquery, test.getRawQuery());
final ManagedExecutionId id = IntegrationUtils.assertQueryResult(conquery, query, 4L, ExecutionState.DONE, conquery.getTestUser(), 201);
assertThat(id).isNotNull();
final MetaStorage metaStorage = conquery.getMetaStorage();
final ManagedQuery execution = (ManagedQuery) metaStorage.getExecution(id);
// Normal reuse
{
final ConceptQuery reused = new ConceptQuery(new CQReusedQuery(execution.getId()));
IntegrationUtils.assertQueryResult(conquery, reused, 2L, ExecutionState.DONE, conquery.getTestUser(), 201);
}
// Reuse by API
{
final URI reexecuteUri = HierarchyHelper.hierarchicalPath(conquery.defaultApiURIBuilder(), QueryResource.class, "reexecute").buildFromMap(Map.of(ResourceConstants.DATASET, conquery.getDataset().getName(), ResourceConstants.QUERY, execution.getId().toString()));
final FullExecutionStatus status = conquery.getClient().target(reexecuteUri).request(MediaType.APPLICATION_JSON).post(Entity.entity(null, MediaType.APPLICATION_JSON_TYPE)).readEntity(FullExecutionStatus.class);
assertThat(status.getStatus()).isIn(ExecutionState.RUNNING, ExecutionState.DONE);
}
// Reuse in SecondaryId
{
final SecondaryIdQuery reused = new SecondaryIdQuery();
reused.setRoot(new CQReusedQuery(execution.getId()));
reused.setSecondaryId(query.getSecondaryId());
IntegrationUtils.assertQueryResult(conquery, reused, 4L, ExecutionState.DONE, conquery.getTestUser(), 201);
}
// Reuse in SecondaryId, but do exclude
{
final SecondaryIdQuery reused = new SecondaryIdQuery();
final CQAnd root = new CQAnd();
reused.setRoot(root);
final CQReusedQuery reuse = new CQReusedQuery(execution.getId());
reuse.setExcludeFromSecondaryId(true);
// We select only a single event of the query by the exact filtering.
final CQConcept cqConcept = new CQConcept();
final ConceptId conceptId = new ConceptId(conquery.getDataset().getId(), "concept");
final Concept<?> concept = conquery.getNamespaceStorage().getConcept(conceptId);
cqConcept.setElements(List.of(concept));
final CQTable cqTable = new CQTable();
cqTable.setConcept(cqConcept);
final CentralRegistry centralRegistry = conquery.getNamespaceStorage().getCentralRegistry();
final Connector connector = centralRegistry.resolve(new ConnectorId(conceptId, "connector1"));
cqTable.setConnector(connector);
cqTable.setFilters(List.of(new FilterValue.CQRealRangeFilter((Filter<Range<BigDecimal>>) centralRegistry.resolve(new FilterId(connector.getId(), "filter")), new Range<>(BigDecimal.valueOf(1.01d), BigDecimal.valueOf(1.01d)))));
cqConcept.setTables(List.of(cqTable));
cqConcept.setExcludeFromSecondaryId(false);
root.setChildren(List.of(reuse, cqConcept));
reused.setSecondaryId(query.getSecondaryId());
IntegrationUtils.assertQueryResult(conquery, reused, 1L, ExecutionState.DONE, conquery.getTestUser(), 201);
}
// Reuse Multiple times with different query types
{
final SecondaryIdQuery reused1 = new SecondaryIdQuery();
reused1.setRoot(new CQReusedQuery(execution.getId()));
reused1.setSecondaryId(query.getSecondaryId());
final ManagedExecutionId reused1Id = IntegrationUtils.assertQueryResult(conquery, reused1, 4L, ExecutionState.DONE, conquery.getTestUser(), 201);
final ManagedQuery execution1 = (ManagedQuery) metaStorage.getExecution(reused1Id);
{
final SecondaryIdQuery reused2 = new SecondaryIdQuery();
reused2.setRoot(new CQReusedQuery(execution1.getId()));
reused2.setSecondaryId(query.getSecondaryId());
final ManagedExecutionId reused2Id = IntegrationUtils.assertQueryResult(conquery, reused2, 4L, ExecutionState.DONE, conquery.getTestUser(), 201);
final ManagedQuery execution2 = (ManagedQuery) metaStorage.getExecution(reused2Id);
assertThat(reused2Id).as("Query should be reused.").isEqualTo(reused1Id);
// Now we change to ConceptQuery
final ConceptQuery reused3 = new ConceptQuery(new CQReusedQuery(execution2.getId()));
IntegrationUtils.assertQueryResult(conquery, reused3, 2L, ExecutionState.DONE, conquery.getTestUser(), 201);
}
{
final SecondaryIdQuery reusedDiffId = new SecondaryIdQuery();
reusedDiffId.setRoot(new CQReusedQuery(execution1.getId()));
// ignored is a single global value and therefore the same as by-PID
reusedDiffId.setSecondaryId(conquery.getNamespace().getStorage().getSecondaryId(new SecondaryIdDescriptionId(conquery.getDataset().getId(), "ignored")));
final ManagedExecutionId executionId = IntegrationUtils.assertQueryResult(conquery, reusedDiffId, 2L, ExecutionState.DONE, conquery.getTestUser(), 201);
assertThat(executionId).as("Query should NOT be reused.").isNotEqualTo(reused1Id);
}
{
// Reuse by another user (create a copy of the actual query)
final SecondaryIdQuery reused = new SecondaryIdQuery();
reused.setRoot(new CQReusedQuery(execution.getId()));
reused.setSecondaryId(query.getSecondaryId());
User shareHolder = new User("shareholder", "ShareHolder", conquery.getMetaStorage());
conquery.getMetaProcessor().addUser(shareHolder);
shareHolder.addPermissions(Set.of(dataset.createPermission(Set.of(Ability.READ)), execution.createPermission(Set.of(Ability.READ))));
ManagedExecutionId copyId = IntegrationUtils.assertQueryResult(conquery, reused, 4L, ExecutionState.DONE, shareHolder, 201);
ManagedExecution<?> copy = metaStorage.getExecution(copyId);
// Contentwise the label and tags should be the same
assertThat(copy).usingRecursiveComparison().comparingOnlyFields("label", "tags").isEqualTo(execution);
// However the Object holding the tags must be different, so the two are not linked here
assertThat(copy.getTags()).isNotSameAs(execution.getTags());
// And the ids must be different
assertThat(copy.getId()).isNotSameAs(execution.getId());
}
}
}
Aggregations