use of com.bakdata.conquery.apiv1.query.Query in project conquery by bakdata.
the class DatasetDeletionTest method execute.
@Override
public void execute(String name, TestConquery testConquery) throws Exception {
final StandaloneSupport conquery = testConquery.getSupport(name);
final MetaStorage storage = conquery.getMetaStorage();
final Dataset dataset = conquery.getDataset();
Namespace namespace = conquery.getNamespace();
final String testJson = In.resource("/tests/query/DELETE_IMPORT_TESTS/SIMPLE_TREECONCEPT_Query.test.json").withUTF8().readAll();
final QueryTest test = (QueryTest) JsonIntegrationTest.readJson(dataset, testJson);
// Manually import data, so we can do our own work.
{
ValidatorHelper.failOnError(log, conquery.getValidator().validate(test));
importSecondaryIds(conquery, test.getContent().getSecondaryIds());
conquery.waitUntilWorkDone();
LoadingUtil.importTables(conquery, test.getContent().getTables());
conquery.waitUntilWorkDone();
LoadingUtil.importConcepts(conquery, test.getRawConcepts());
conquery.waitUntilWorkDone();
LoadingUtil.importTableContents(conquery, test.getContent().getTables());
conquery.waitUntilWorkDone();
}
final Query query = IntegrationUtils.parseQuery(conquery, test.getRawQuery());
final int nImports = namespace.getStorage().getAllImports().size();
log.info("Checking state before deletion");
// Assert state before deletion.
{
// Must contain the import.
assertThat(namespace.getStorage().getCentralRegistry().getOptional(dataset.getId())).isNotEmpty();
for (ShardNode node : conquery.getShardNodes()) {
for (Worker value : node.getWorkers().getWorkers().values()) {
if (!value.getInfo().getDataset().equals(dataset.getId())) {
continue;
}
final ModificationShieldedWorkerStorage workerStorage = value.getStorage();
assertThat(workerStorage.getAllCBlocks()).describedAs("CBlocks for Worker %s", value.getInfo().getId()).isNotEmpty();
assertThat(workerStorage.getAllBuckets()).describedAs("Buckets for Worker %s", value.getInfo().getId()).isNotEmpty();
}
}
log.info("Executing query before deletion");
IntegrationUtils.assertQueryResult(conquery, query, 2L, ExecutionState.DONE, conquery.getTestUser(), 201);
}
// Delete Dataset.
{
log.info("Issuing deletion of import {}", dataset);
// Delete the import.
// But, we do not allow deletion of tables with associated connectors, so this should throw!
assertThatThrownBy(() -> conquery.getDatasetsProcessor().deleteDataset(dataset)).isInstanceOf(WebApplicationException.class);
// TODO use api
conquery.getNamespace().getStorage().getTables().forEach(tableId -> conquery.getDatasetsProcessor().deleteTable(tableId, true));
conquery.waitUntilWorkDone();
// Finally delete dataset
conquery.getDatasetsProcessor().deleteDataset(dataset);
conquery.waitUntilWorkDone();
assertThat(storage.getCentralRegistry().getOptional(dataset.getId())).isEmpty();
}
// State after deletion.
{
log.info("Checking state after deletion");
// We have deleted an import now there should be two less!
assertThat(namespace.getStorage().getAllImports().size()).isEqualTo(0);
// The deleted import should not be found.
assertThat(namespace.getStorage().getAllImports()).filteredOn(imp -> imp.getId().getTable().getDataset().equals(dataset.getId())).isEmpty();
for (ShardNode node : conquery.getShardNodes()) {
for (Worker value : node.getWorkers().getWorkers().values()) {
if (!value.getInfo().getDataset().equals(dataset.getId())) {
continue;
}
final ModificationShieldedWorkerStorage workerStorage = value.getStorage();
// No bucket should be found referencing the import.
assertThat(workerStorage.getAllBuckets()).describedAs("Buckets for Worker %s", value.getInfo().getId()).filteredOn(bucket -> bucket.getTable().getDataset().getId().equals(dataset.getId())).isEmpty();
// No CBlock associated with import may exist
assertThat(workerStorage.getAllCBlocks()).describedAs("CBlocks for Worker %s", value.getInfo().getId()).filteredOn(cBlock -> cBlock.getBucket().getTable().getDataset().getId().equals(dataset.getId())).isEmpty();
}
}
// It's not exactly possible to issue a query for a non-existant dataset, so we assert that parsing the fails.
assertThatThrownBy(() -> {
IntegrationUtils.parseQuery(conquery, test.getRawQuery());
}).isNotNull();
IntegrationUtils.assertQueryResult(conquery, query, 0, ExecutionState.FAILED, conquery.getTestUser(), 404);
}
// Reload the dataset and assert the state.
// We have to do some weird trix with StandaloneSupport to open it with another Dataset
final StandaloneSupport conqueryReimport = testConquery.getSupport(namespace.getDataset().getName());
{
// only import the deleted import/table
LoadingUtil.importTables(conqueryReimport, test.getContent().getTables());
assertThat(conqueryReimport.getNamespace().getStorage().getTables()).isNotEmpty();
conqueryReimport.waitUntilWorkDone();
LoadingUtil.importTableContents(conqueryReimport, test.getContent().getTables());
conqueryReimport.waitUntilWorkDone();
LoadingUtil.importConcepts(conqueryReimport, test.getRawConcepts());
conqueryReimport.waitUntilWorkDone();
assertThat(conqueryReimport.getDatasetsProcessor().getDatasetRegistry().get(conqueryReimport.getDataset().getId())).describedAs("Dataset after re-import.").isNotNull();
assertThat(conqueryReimport.getNamespace().getStorage().getAllImports().size()).isEqualTo(nImports);
for (ShardNode node : conqueryReimport.getShardNodes()) {
assertThat(node.getWorkers().getWorkers().values()).filteredOn(w -> w.getInfo().getDataset().equals(conqueryReimport.getDataset().getId())).describedAs("Workers for node {}", node.getName()).isNotEmpty();
}
log.info("Executing query after re-import");
final Query query2 = IntegrationUtils.parseQuery(conqueryReimport, test.getRawQuery());
// Issue a query and assert that it has the same content as the first time around.
IntegrationUtils.assertQueryResult(conqueryReimport, query2, 2L, ExecutionState.DONE, conqueryReimport.getTestUser(), 201);
}
// Finally, restart conquery and assert again, that the data is correct.
{
testConquery.shutdown();
// restart
testConquery.beforeAll();
final StandaloneSupport conqueryRestart = testConquery.openDataset(conqueryReimport.getDataset().getId());
log.info("Checking state after re-start");
assertThat(conqueryRestart.getNamespace().getStorage().getAllImports().size()).isEqualTo(2);
for (ShardNode node : conqueryRestart.getShardNodes()) {
for (Worker value : node.getWorkers().getWorkers().values()) {
if (!value.getInfo().getDataset().equals(dataset.getId())) {
continue;
}
final ModificationShieldedWorkerStorage workerStorage = value.getStorage();
assertThat(workerStorage.getAllBuckets().stream().filter(bucket -> bucket.getTable().getDataset().getId().equals(dataset.getId()))).describedAs("Buckets for Worker %s", value.getInfo().getId()).isNotEmpty();
}
}
log.info("Executing query after restart");
final Query query3 = IntegrationUtils.parseQuery(conqueryRestart, test.getRawQuery());
// Issue a query and assert that it has the same content as the first time around.
IntegrationUtils.assertQueryResult(conqueryRestart, query3, 2L, ExecutionState.DONE, conquery.getTestUser(), 201);
}
}
use of com.bakdata.conquery.apiv1.query.Query in project conquery by bakdata.
the class FilterTest method parseQuery.
private Query parseQuery(StandaloneSupport support) throws JSONException, IOException {
rawFilterValue.put("filter", support.getDataset().getName() + ".concept.connector.filter");
FilterValue<?> result = parseSubTree(support, rawFilterValue, Jackson.MAPPER.getTypeFactory().constructType(FilterValue.class));
CQTable cqTable = new CQTable();
cqTable.setFilters(Collections.singletonList(result));
cqTable.setConnector(connector);
CQConcept cqConcept = new CQConcept();
cqTable.setConcept(cqConcept);
cqConcept.setElements(Collections.singletonList(concept));
cqConcept.setTables(Collections.singletonList(cqTable));
if (dateRange != null) {
CQDateRestriction restriction = new CQDateRestriction();
restriction.setDateRange(dateRange);
restriction.setChild(cqConcept);
return new ConceptQuery(restriction);
}
return new ConceptQuery(cqConcept);
}
use of com.bakdata.conquery.apiv1.query.Query in project conquery by bakdata.
the class ConceptPermissionTest method execute.
@Override
public void execute(StandaloneSupport conquery) throws Exception {
final MetaStorage storage = conquery.getMetaStorage();
final Dataset dataset = conquery.getDataset();
final String testJson = In.resource("/tests/query/SIMPLE_TREECONCEPT_QUERY/SIMPLE_TREECONCEPT_Query.test.json").withUTF8().readAll();
final QueryTest test = (QueryTest) JsonIntegrationTest.readJson(dataset.getId(), testJson);
final QueryProcessor processor = new QueryProcessor(conquery.getDatasetRegistry(), storage, conquery.getConfig());
final User user = new User("testUser", "testUserLabel", storage);
// Manually import data, so we can do our own work.
{
ValidatorHelper.failOnError(log, conquery.getValidator().validate(test));
importSecondaryIds(conquery, test.getContent().getSecondaryIds());
conquery.waitUntilWorkDone();
LoadingUtil.importTables(conquery, test.getContent().getTables());
conquery.waitUntilWorkDone();
LoadingUtil.importConcepts(conquery, test.getRawConcepts());
conquery.waitUntilWorkDone();
LoadingUtil.importTableContents(conquery, test.getContent().getTables());
conquery.waitUntilWorkDone();
storage.addUser(user);
user.addPermission(DatasetPermission.onInstance(Ability.READ, dataset.getId()));
}
// Query cannot be deserialized without Namespace set up
final Query query = IntegrationUtils.parseQuery(conquery, test.getRawQuery());
// Id of the lone concept that is used in the test.
Concept<?> conceptId = conquery.getNamespace().getStorage().getAllConcepts().iterator().next();
IntegrationUtils.assertQueryResult(conquery, query, -1, ExecutionState.FAILED, user, 403);
// Add the necessary Permission
{
final ConqueryPermission permission = conceptId.createPermission(Ability.READ.asSet());
log.info("Adding the Permission[{}] to User[{}]", permission, user);
user.addPermission(permission);
}
// Only assert permissions
IntegrationUtils.assertQueryResult(conquery, query, -1, ExecutionState.DONE, user, 201);
conquery.waitUntilWorkDone();
// Clean up
{
storage.removeUser(user.getId());
}
}
use of com.bakdata.conquery.apiv1.query.Query in project conquery by bakdata.
the class AbstractQueryEngineTest method executeTest.
@Override
public void executeTest(StandaloneSupport standaloneSupport) throws IOException {
Query query = getQuery();
assertThat(standaloneSupport.getValidator().validate(query)).describedAs("Query Validation Errors").isEmpty();
log.info("{} QUERY INIT", getLabel());
final User testUser = standaloneSupport.getTestUser();
final ManagedExecutionId executionId = IntegrationUtils.assertQueryResult(standaloneSupport, query, -1, ExecutionState.DONE, testUser, 201);
final ManagedQuery execution = (ManagedQuery) standaloneSupport.getMetaStorage().getExecution(executionId);
// check result info size
List<ResultInfo> resultInfos = execution.getResultInfos();
assertThat(execution.streamResults().flatMap(EntityResult::streamValues)).as("Should have same size as result infos").allSatisfy(v -> assertThat(v).hasSameSizeAs(resultInfos));
// Get the actual response and compare with expected result.
final Response csvResponse = standaloneSupport.getClient().target(HierarchyHelper.hierarchicalPath(standaloneSupport.defaultApiURIBuilder(), ResultCsvResource.class, "getAsCsv").buildFromMap(Map.of(DATASET, standaloneSupport.getDataset().getName(), QUERY, execution.getId().toString()))).queryParam("pretty", false).request(AdditionalMediaTypes.CSV).acceptLanguage(Locale.ENGLISH).get();
List<String> actual = In.stream(((InputStream) csvResponse.getEntity())).readLines();
ResourceFile expectedCsv = getExpectedCsv();
List<String> expected = In.stream(expectedCsv.stream()).readLines();
assertThat(actual).as("Results for %s are not as expected.", this).containsExactlyInAnyOrderElementsOf(expected);
// check that getLastResultCount returns the correct size
if (execution.streamResults().noneMatch(MultilineEntityResult.class::isInstance)) {
assertThat(execution.getLastResultCount()).as("Result count for %s is not as expected.", this).isEqualTo(expected.size() - 1);
}
log.info("INTEGRATION TEST SUCCESSFUL {} {} on {} rows", getClass().getSimpleName(), this, expected.size());
}
use of com.bakdata.conquery.apiv1.query.Query in project conquery by bakdata.
the class LoadingUtil method importPreviousQueries.
public static void importPreviousQueries(StandaloneSupport support, RequiredData content, User user) throws IOException {
// Load previous query results if available
int id = 1;
for (ResourceFile queryResults : content.getPreviousQueryResults()) {
UUID queryId = new UUID(0L, id++);
final CsvParser parser = support.getConfig().getCsv().withParseHeaders(false).withSkipHeader(false).createParser();
String[][] data = parser.parseAll(queryResults.stream()).toArray(new String[0][]);
ConceptQuery q = new ConceptQuery(new CQExternal(Arrays.asList("ID", "DATE_SET"), data));
ManagedExecution<?> managed = support.getNamespace().getExecutionManager().createQuery(support.getNamespace().getNamespaces(), q, queryId, user, support.getNamespace().getDataset());
user.addPermission(managed.createPermission(AbilitySets.QUERY_CREATOR));
if (managed.getState() == ExecutionState.FAILED) {
fail("Query failed");
}
}
for (JsonNode queryNode : content.getPreviousQueries()) {
ObjectMapper mapper = new SingletonNamespaceCollection(support.getNamespaceStorage().getCentralRegistry()).injectIntoNew(Jackson.MAPPER);
mapper = support.getDataset().injectIntoNew(mapper);
Query query = mapper.readerFor(Query.class).readValue(queryNode);
UUID queryId = new UUID(0L, id++);
ManagedExecution<?> managed = support.getNamespace().getExecutionManager().createQuery(support.getNamespace().getNamespaces(), query, queryId, user, support.getNamespace().getDataset());
user.addPermission(ExecutionPermission.onInstance(AbilitySets.QUERY_CREATOR, managed.getId()));
if (managed.getState() == ExecutionState.FAILED) {
fail("Query failed");
}
}
// wait only if we actually did anything
if (!content.getPreviousQueryResults().isEmpty()) {
support.waitUntilWorkDone();
}
}
Aggregations