use of com.bakdata.conquery.util.support.StandaloneSupport in project conquery by bakdata.
the class ImportDeletionTest method execute.
@Override
public void execute(String name, TestConquery testConquery) throws Exception {
final StandaloneSupport conquery = testConquery.getSupport(name);
MetaStorage storage = conquery.getMetaStorage();
final String testJson = In.resource("/tests/query/DELETE_IMPORT_TESTS/SIMPLE_TREECONCEPT_Query.test.json").withUTF8().readAll();
final Dataset dataset = conquery.getDataset();
final Namespace namespace = conquery.getNamespace();
final ImportId importId = ImportId.Parser.INSTANCE.parse(dataset.getName(), "test_table2", "test_table2");
final QueryTest test = (QueryTest) JsonIntegrationTest.readJson(dataset, testJson);
// Manually import data, so we can do our own work.
{
ValidatorHelper.failOnError(log, conquery.getValidator().validate(test));
importSecondaryIds(conquery, test.getContent().getSecondaryIds());
conquery.waitUntilWorkDone();
LoadingUtil.importTables(conquery, test.getContent().getTables());
conquery.waitUntilWorkDone();
LoadingUtil.importConcepts(conquery, test.getRawConcepts());
conquery.waitUntilWorkDone();
LoadingUtil.importTableContents(conquery, test.getContent().getTables());
conquery.waitUntilWorkDone();
}
final Query query = IntegrationUtils.parseQuery(conquery, test.getRawQuery());
final int nImports = namespace.getStorage().getAllImports().size();
// State before deletion.
{
log.info("Checking state before deletion");
// Must contain the import.
assertThat(namespace.getStorage().getAllImports()).filteredOn(imp -> imp.getId().equals(importId)).isNotEmpty();
assertThat(namespace.getStorage().getCentralRegistry().getOptional(importId)).isNotEmpty();
for (ShardNode node : conquery.getShardNodes()) {
for (Worker worker : node.getWorkers().getWorkers().values()) {
if (!worker.getInfo().getDataset().equals(dataset.getId())) {
continue;
}
final ModificationShieldedWorkerStorage workerStorage = worker.getStorage();
assertThat(workerStorage.getAllCBlocks()).describedAs("CBlocks for Worker %s", worker.getInfo().getId()).filteredOn(block -> block.getBucket().getId().getDataset().equals(dataset.getId())).isNotEmpty();
assertThat(workerStorage.getAllBuckets()).filteredOn(bucket -> bucket.getId().getDataset().equals(dataset.getId())).describedAs("Buckets for Worker %s", worker.getInfo().getId()).isNotEmpty();
// Must contain the import.
assertThat(workerStorage.getImport(importId)).isNotNull();
}
}
log.info("Executing query before deletion");
IntegrationUtils.assertQueryResult(conquery, query, 2L, ExecutionState.DONE, conquery.getTestUser(), 201);
}
// Delete the import.
{
log.info("Issuing deletion of import {}", importId);
final URI deleteImportUri = HierarchyHelper.hierarchicalPath(conquery.defaultAdminURIBuilder(), AdminTablesResource.class, "deleteImport").buildFromMap(Map.of(ResourceConstants.DATASET, conquery.getDataset().getId(), ResourceConstants.TABLE, importId.getTable(), ResourceConstants.IMPORT_ID, importId));
final Response delete = conquery.getClient().target(deleteImportUri).request(MediaType.APPLICATION_JSON).delete();
assertThat(delete.getStatusInfo().getFamily()).isEqualTo(Response.Status.Family.SUCCESSFUL);
conquery.waitUntilWorkDone();
}
// State after deletion.
{
log.info("Checking state after deletion");
// We have deleted an import now there should be one less!
assertThat(namespace.getStorage().getAllImports().size()).isEqualTo(nImports - 1);
// The deleted import should not be found.
assertThat(namespace.getStorage().getAllImports()).filteredOn(imp -> imp.getId().equals(importId)).isEmpty();
for (ShardNode node : conquery.getShardNodes()) {
for (Worker worker : node.getWorkers().getWorkers().values()) {
if (!worker.getInfo().getDataset().equals(dataset.getId())) {
continue;
}
final ModificationShieldedWorkerStorage workerStorage = worker.getStorage();
// No bucket should be found referencing the import.
assertThat(workerStorage.getAllBuckets()).describedAs("Buckets for Worker %s", worker.getInfo().getId()).filteredOn(bucket -> bucket.getImp().getId().equals(importId)).isEmpty();
// No CBlock associated with import may exist
assertThat(workerStorage.getAllCBlocks()).describedAs("CBlocks for Worker %s", worker.getInfo().getId()).filteredOn(cBlock -> cBlock.getBucket().getId().getImp().equals(importId)).isEmpty();
// Import should not exists anymore
assertThat(workerStorage.getImport(importId)).describedAs("Import for Worker %s", worker.getInfo().getId()).isNull();
}
}
log.info("Executing query after deletion");
// Issue a query and assert that it has less content.
IntegrationUtils.assertQueryResult(conquery, query, 1L, ExecutionState.DONE, conquery.getTestUser(), 201);
}
conquery.waitUntilWorkDone();
// Load more data under the same name into the same table, with only the deleted import/table
{
// only import the deleted import/table
final RequiredTable import2Table = test.getContent().getTables().stream().filter(table -> table.getName().equalsIgnoreCase(importId.getTable().getTable())).findFirst().orElseThrow();
final ResourceFile csv = import2Table.getCsv();
final String path = csv.getPath();
// copy csv to tmp folder
// Content 2.2 contains an extra entry of a value that hasn't been seen before.
FileUtils.copyInputStreamToFile(In.resource(path.substring(0, path.lastIndexOf('/')) + "/" + "content2.2.csv").asStream(), new File(conquery.getTmpDir(), csv.getName()));
File descriptionFile = new File(conquery.getTmpDir(), import2Table.getName() + ConqueryConstants.EXTENSION_DESCRIPTION);
File preprocessedFile = new File(conquery.getTmpDir(), import2Table.getName() + ConqueryConstants.EXTENSION_PREPROCESSED);
// create import descriptor
TableImportDescriptor desc = new TableImportDescriptor();
desc.setName(import2Table.getName());
desc.setTable(import2Table.getName());
TableInputDescriptor input = new TableInputDescriptor();
{
input.setPrimary(import2Table.getPrimaryColumn().createOutput());
input.setSourceFile(import2Table.getCsv().getName());
input.setOutput(new OutputDescription[import2Table.getColumns().length]);
for (int i = 0; i < import2Table.getColumns().length; i++) {
input.getOutput()[i] = import2Table.getColumns()[i].createOutput();
}
}
desc.setInputs(new TableInputDescriptor[] { input });
Jackson.MAPPER.writeValue(descriptionFile, desc);
// preprocess
conquery.preprocessTmp(conquery.getTmpDir(), List.of(descriptionFile));
// import preprocessedFiles
conquery.getDatasetsProcessor().addImport(conquery.getNamespace(), new GZIPInputStream(new FileInputStream(preprocessedFile)));
conquery.waitUntilWorkDone();
}
// State after reimport.
{
log.info("Checking state after re-import");
assertThat(namespace.getStorage().getAllImports().size()).isEqualTo(nImports);
for (ShardNode node : conquery.getShardNodes()) {
for (Worker worker : node.getWorkers().getWorkers().values()) {
if (!worker.getInfo().getDataset().equals(dataset.getId())) {
continue;
}
final ModificationShieldedWorkerStorage workerStorage = worker.getStorage();
assertThat(workerStorage.getAllBuckets()).describedAs("Buckets for Worker %s", worker.getInfo().getId()).filteredOn(bucket -> bucket.getImp().getId().equals(importId)).filteredOn(bucket -> bucket.getId().getDataset().equals(dataset.getId())).isNotEmpty();
}
}
log.info("Executing query after re-import");
// Issue a query and assert that it has the same content as the first time around.
IntegrationUtils.assertQueryResult(conquery, query, 2L, ExecutionState.DONE, conquery.getTestUser(), 201);
}
// Finally, restart conquery and assert again, that the data is correct.
{
testConquery.shutdown();
// restart
testConquery.beforeAll();
StandaloneSupport conquery2 = testConquery.openDataset(dataset.getId());
log.info("Checking state after re-start");
{
assertThat(namespace.getStorage().getAllImports().size()).isEqualTo(2);
for (ShardNode node : conquery2.getShardNodes()) {
for (Worker worker : node.getWorkers().getWorkers().values()) {
if (!worker.getInfo().getDataset().equals(dataset.getId()))
continue;
final ModificationShieldedWorkerStorage workerStorage = worker.getStorage();
assertThat(workerStorage.getAllBuckets()).describedAs("Buckets for Worker %s", worker.getInfo().getId()).filteredOn(bucket -> bucket.getId().getDataset().equals(dataset.getId())).filteredOn(bucket -> bucket.getImp().getId().equals(importId)).isNotEmpty();
}
}
log.info("Executing query after re-import");
// Issue a query and assert that it has the same content as the first time around.
IntegrationUtils.assertQueryResult(conquery2, query, 2L, ExecutionState.DONE, conquery.getTestUser(), 201);
}
}
}
use of com.bakdata.conquery.util.support.StandaloneSupport in project conquery by bakdata.
the class TableDeletionTest method execute.
@Override
public void execute(String name, TestConquery testConquery) throws Exception {
final StandaloneSupport conquery = testConquery.getSupport(name);
final MetaStorage storage = conquery.getMetaStorage();
final String testJson = In.resource("/tests/query/DELETE_IMPORT_TESTS/SIMPLE_TREECONCEPT_Query.test.json").withUTF8().readAll();
final Dataset dataset = conquery.getDataset();
final Namespace namespace = conquery.getNamespace();
final TableId tableId = TableId.Parser.INSTANCE.parse(dataset.getName(), "test_table2");
final QueryTest test = (QueryTest) JsonIntegrationTest.readJson(dataset, testJson);
// Manually import data, so we can do our own work.
{
ValidatorHelper.failOnError(log, conquery.getValidator().validate(test));
importSecondaryIds(conquery, test.getContent().getSecondaryIds());
conquery.waitUntilWorkDone();
LoadingUtil.importTables(conquery, test.getContent().getTables());
conquery.waitUntilWorkDone();
LoadingUtil.importConcepts(conquery, test.getRawConcepts());
conquery.waitUntilWorkDone();
LoadingUtil.importTableContents(conquery, test.getContent().getTables());
conquery.waitUntilWorkDone();
}
final Query query = IntegrationUtils.parseQuery(conquery, test.getRawQuery());
final int nImports = namespace.getStorage().getAllImports().size();
// State before deletion.
{
log.info("Checking state before deletion");
// Must contain the import.
assertThat(namespace.getStorage().getCentralRegistry().getOptional(tableId)).isNotEmpty();
for (ShardNode node : conquery.getShardNodes()) {
for (Worker value : node.getWorkers().getWorkers().values()) {
if (!value.getInfo().getDataset().equals(dataset.getId())) {
continue;
}
final ModificationShieldedWorkerStorage workerStorage = value.getStorage();
assertThat(workerStorage.getAllCBlocks()).describedAs("CBlocks for Worker %s", value.getInfo().getId()).isNotEmpty();
assertThat(workerStorage.getAllBuckets()).describedAs("Buckets for Worker %s", value.getInfo().getId()).isNotEmpty();
}
}
log.info("Executing query before deletion");
IntegrationUtils.assertQueryResult(conquery, query, 2L, ExecutionState.DONE, conquery.getTestUser(), 201);
}
// Delete the import.
{
log.info("Issuing deletion of import {}", tableId);
// Delete the import via API.
// But, we do not allow deletion of tables with associated connectors, so this should throw!
final URI deleteTable = HierarchyHelper.hierarchicalPath(conquery.defaultAdminURIBuilder(), AdminTablesResource.class, "remove").buildFromMap(Map.of(ResourceConstants.DATASET, conquery.getDataset().getName(), ResourceConstants.TABLE, tableId.toString()));
final Response failed = conquery.getClient().target(deleteTable).request().delete();
assertThat(failed.getStatusInfo().getFamily()).isEqualTo(Response.Status.Family.CLIENT_ERROR);
conquery.getDatasetsProcessor().deleteConcept(conquery.getNamespace().getStorage().getAllConcepts().iterator().next());
Thread.sleep(100);
conquery.waitUntilWorkDone();
final Response success = conquery.getClient().target(deleteTable).request().delete();
assertThat(success.getStatusInfo().getStatusCode()).isEqualTo(Response.Status.OK.getStatusCode());
Thread.sleep(100);
conquery.waitUntilWorkDone();
}
// State after deletion.
{
log.info("Checking state after deletion");
// We have deleted an import now there should be two less!
assertThat(namespace.getStorage().getAllImports().size()).isEqualTo(nImports - 1);
// The deleted import should not be found.
assertThat(namespace.getStorage().getAllImports()).filteredOn(imp -> imp.getId().getTable().equals(tableId)).isEmpty();
for (ShardNode node : conquery.getShardNodes()) {
for (Worker value : node.getWorkers().getWorkers().values()) {
if (!value.getInfo().getDataset().equals(dataset.getId())) {
continue;
}
final ModificationShieldedWorkerStorage workerStorage = value.getStorage();
// No bucket should be found referencing the import.
assertThat(workerStorage.getAllBuckets()).describedAs("Buckets for Worker %s", value.getInfo().getId()).filteredOn(bucket -> bucket.getImp().getTable().getId().equals(tableId)).isEmpty();
// No CBlock associated with import may exist
assertThat(workerStorage.getAllCBlocks()).describedAs("CBlocks for Worker %s", value.getInfo().getId()).filteredOn(cBlock -> cBlock.getBucket().getImp().getTable().getId().equals(tableId)).isEmpty();
}
}
log.info("Executing query after deletion");
// Issue a query and asseert that it has less content.
IntegrationUtils.assertQueryResult(conquery, query, 0L, ExecutionState.FAILED, conquery.getTestUser(), 400);
}
conquery.waitUntilWorkDone();
// Load the same import into the same table, with only the deleted import/table
{
// only import the deleted import/table
LoadingUtil.importTables(conquery, test.getContent().getTables().stream().filter(table -> table.getName().equalsIgnoreCase(tableId.getTable())).collect(Collectors.toList()));
conquery.waitUntilWorkDone();
LoadingUtil.importTableContents(conquery, test.getContent().getTables().stream().filter(table -> table.getName().equalsIgnoreCase(tableId.getTable())).collect(Collectors.toList()));
conquery.waitUntilWorkDone();
LoadingUtil.importConcepts(conquery, test.getRawConcepts());
conquery.waitUntilWorkDone();
assertThat(namespace.getStorage().getTable(tableId)).describedAs("Table after re-import.").isNotNull();
for (ShardNode node : conquery.getShardNodes()) {
for (Worker value : node.getWorkers().getWorkers().values()) {
if (!value.getInfo().getDataset().equals(dataset.getId())) {
continue;
}
assertThat(value.getStorage().getCentralRegistry().resolve(tableId)).describedAs("Table in worker storage.").isNotNull();
}
}
}
// Test state after reimport.
{
log.info("Checking state after re-import");
assertThat(namespace.getStorage().getAllImports().size()).isEqualTo(nImports);
for (ShardNode node : conquery.getShardNodes()) {
for (Worker value : node.getWorkers().getWorkers().values()) {
if (!value.getInfo().getDataset().equals(dataset.getId())) {
continue;
}
final ModificationShieldedWorkerStorage workerStorage = value.getStorage();
assertThat(workerStorage.getAllBuckets().stream().filter(bucket -> bucket.getImp().getTable().getId().equals(tableId))).describedAs("Buckets for Worker %s", value.getInfo().getId()).isNotEmpty();
}
}
log.info("Executing query after re-import");
// Issue a query and assert that it has the same content as the first time around.
IntegrationUtils.assertQueryResult(conquery, query, 2L, ExecutionState.DONE, conquery.getTestUser(), 201);
}
// Finally, restart conquery and assert again, that the data is correct.
{
testConquery.shutdown();
// restart
testConquery.beforeAll();
StandaloneSupport conquery2 = testConquery.openDataset(dataset.getId());
log.info("Checking state after re-start");
{
assertThat(namespace.getStorage().getAllImports().size()).isEqualTo(2);
for (ShardNode node : conquery2.getShardNodes()) {
for (Worker value : node.getWorkers().getWorkers().values()) {
if (!value.getInfo().getDataset().equals(dataset.getId())) {
continue;
}
final ModificationShieldedWorkerStorage workerStorage = value.getStorage();
assertThat(workerStorage.getAllBuckets().stream().filter(bucket -> bucket.getImp().getTable().getId().equals(tableId))).describedAs("Buckets for Worker %s", value.getInfo().getId()).isNotEmpty();
}
}
log.info("Executing query after re-import");
// Issue a query and assert that it has the same content as the first time around.
IntegrationUtils.assertQueryResult(conquery2, query, 2L, ExecutionState.DONE, conquery.getTestUser(), 201);
}
}
}
use of com.bakdata.conquery.util.support.StandaloneSupport in project conquery by bakdata.
the class RestartTest method execute.
@Override
public void execute(String name, TestConquery testConquery) throws Exception {
// read test specification
String testJson = In.resource("/tests/query/RESTART_TEST_DATA/SIMPLE_TREECONCEPT_Query.json").withUTF8().readAll();
Validator validator = Validators.newValidator();
EntityIdMap entityIdMap = IdMapSerialisationTest.createTestPersistentMap();
ManagerNode manager = testConquery.getStandaloneCommand().getManager();
AdminDatasetProcessor adminDatasetProcessor = manager.getAdmin().getAdminDatasetProcessor();
AdminProcessor adminProcessor = manager.getAdmin().getAdminProcessor();
StandaloneSupport conquery = testConquery.getSupport(name);
DatasetId dataset = conquery.getDataset().getId();
ConqueryTestSpec test = JsonIntegrationTest.readJson(dataset, testJson);
ValidatorHelper.failOnError(log, validator.validate(test));
test.importRequiredData(conquery);
test.executeTest(conquery);
final int numberOfExecutions = conquery.getMetaStorage().getAllExecutions().size();
// IDMapping Testing
NamespaceStorage namespaceStorage = conquery.getNamespaceStorage();
namespaceStorage.updateIdMapping(entityIdMap);
final Dataset dataset1 = adminDatasetProcessor.addDataset(TEST_DATASET_1);
final Dataset dataset2 = adminDatasetProcessor.addDataset(TEST_DATASET_2);
final Dataset dataset3 = adminDatasetProcessor.addDataset(TEST_DATASET_3);
final Dataset dataset4 = adminDatasetProcessor.addDataset(TEST_DATASET_4);
final Dataset dataset5 = adminDatasetProcessor.addDataset(TEST_DATASET_5);
final Dataset dataset6 = adminDatasetProcessor.addDataset(TEST_DATASET_6);
MetaStorage storage = conquery.getMetaStorage();
Role role = new Role("role", "ROLE", storage);
Role roleToDelete = new Role("roleDelete", "ROLE_DELETE", storage);
User user = new User("user@test.email", "USER", storage);
User userToDelete = new User("userDelete@test.email", "USER_DELETE", storage);
Group group = new Group("group", "GROUP", storage);
Group groupToDelete = new Group("groupDelete", "GROUP_DELETE", storage);
{
// Auth testing (deletion and permission grant)
// build constellation
// TODO USE APIS
adminProcessor.addUser(user);
adminProcessor.addUser(userToDelete);
adminProcessor.addRole(role);
adminProcessor.addRole(roleToDelete);
adminProcessor.addGroup(group);
adminProcessor.addGroup(groupToDelete);
adminProcessor.addRoleTo(user, role);
adminProcessor.addRoleTo(user, roleToDelete);
adminProcessor.addRoleTo(userToDelete, role);
adminProcessor.addRoleTo(userToDelete, roleToDelete);
adminProcessor.addRoleTo(group, role);
adminProcessor.addRoleTo(group, roleToDelete);
adminProcessor.addRoleTo(groupToDelete, role);
adminProcessor.addRoleTo(groupToDelete, roleToDelete);
adminProcessor.addUserToGroup(group, user);
adminProcessor.addUserToGroup(group, userToDelete);
adminProcessor.addUserToGroup(groupToDelete, user);
adminProcessor.addUserToGroup(groupToDelete, userToDelete);
// Adding Permissions
adminProcessor.createPermission(user, dataset1.createPermission(Ability.READ.asSet()));
adminProcessor.createPermission(userToDelete, dataset2.createPermission(Ability.READ.asSet()));
adminProcessor.createPermission(role, dataset3.createPermission(Ability.READ.asSet()));
adminProcessor.createPermission(roleToDelete, dataset4.createPermission(Ability.READ.asSet()));
adminProcessor.createPermission(group, dataset5.createPermission(Ability.READ.asSet()));
adminProcessor.createPermission(groupToDelete, dataset6.createPermission(Ability.READ.asSet()));
// Delete entities
// TODO use API
adminProcessor.deleteUser(userToDelete);
adminProcessor.deleteRole(roleToDelete);
adminProcessor.deleteGroup(groupToDelete);
}
log.info("Shutting down for restart");
testConquery.shutdown();
log.info("Restarting");
testConquery.beforeAll();
final StandaloneSupport support = testConquery.openDataset(dataset);
log.info("Restart complete");
DatasetRegistry datasetRegistry = support.getDatasetsProcessor().getDatasetRegistry();
assertThat(support.getMetaStorage().getAllExecutions().size()).as("Executions after restart").isEqualTo(numberOfExecutions);
test.executeTest(support);
{
// Auth actual tests
User userStored = storage.getUser(user.getId());
assertThat(userStored).isEqualTo(user);
assertThat(storage.getRole(role.getId())).isEqualTo(role);
assertThat(storage.getGroup(group.getId())).isEqualTo(group);
assertThat(storage.getUser(userToDelete.getId())).as("deleted user should stay deleted").isNull();
assertThat(storage.getRole(roleToDelete.getId())).as("deleted role should stay deleted").isNull();
assertThat(storage.getGroup(groupToDelete.getId())).as("deleted group should stay deleted").isNull();
assertThat(userStored.isPermitted(datasetRegistry.get(TEST_DATASET_1.getId()).getDataset(), Ability.READ)).isTrue();
assertThat(userStored.isPermitted(datasetRegistry.get(TEST_DATASET_2.getId()).getDataset(), Ability.READ)).isFalse();
assertThat(userStored.isPermitted(datasetRegistry.get(TEST_DATASET_3.getId()).getDataset(), Ability.READ)).isTrue();
assertThat(userStored.isPermitted(datasetRegistry.get(TEST_DATASET_4.getId()).getDataset(), Ability.READ)).isFalse();
assertThat(userStored.isPermitted(datasetRegistry.get(TEST_DATASET_5.getId()).getDataset(), Ability.READ)).isTrue();
assertThat(userStored.isPermitted(datasetRegistry.get(TEST_DATASET_6.getId()).getDataset(), Ability.READ)).isFalse();
}
EntityIdMap entityIdMapAfterRestart = conquery.getNamespaceStorage().getIdMapping();
assertThat(entityIdMapAfterRestart).isEqualTo(entityIdMap);
// We need to reassign the dataset processor because the instance prio to the restart became invalid
adminDatasetProcessor = testConquery.getStandaloneCommand().getManager().getAdmin().getAdminDatasetProcessor();
// Cleanup
adminDatasetProcessor.deleteDataset(dataset1);
adminDatasetProcessor.deleteDataset(dataset2);
adminDatasetProcessor.deleteDataset(dataset3);
adminDatasetProcessor.deleteDataset(dataset4);
adminDatasetProcessor.deleteDataset(dataset5);
adminDatasetProcessor.deleteDataset(dataset6);
}
use of com.bakdata.conquery.util.support.StandaloneSupport in project conquery by bakdata.
the class ReusedQueryTest method execute.
@Override
public void execute(String name, TestConquery testConquery) throws Exception {
final StandaloneSupport conquery = testConquery.getSupport(name);
final String testJson = In.resource("/tests/query/SECONDARY_ID_MIXED/SECONDARY_IDS_MIXED.test.json").withUTF8().readAll();
final Dataset dataset = conquery.getDataset();
final QueryTest test = (QueryTest) JsonIntegrationTest.readJson(dataset, testJson);
// Manually import data, so we can do our own work.
{
ValidatorHelper.failOnError(log, conquery.getValidator().validate(test));
importSecondaryIds(conquery, test.getContent().getSecondaryIds());
conquery.waitUntilWorkDone();
LoadingUtil.importTables(conquery, test.getContent().getTables());
conquery.waitUntilWorkDone();
LoadingUtil.importConcepts(conquery, test.getRawConcepts());
conquery.waitUntilWorkDone();
LoadingUtil.importTableContents(conquery, test.getContent().getTables());
conquery.waitUntilWorkDone();
}
final SecondaryIdQuery query = (SecondaryIdQuery) IntegrationUtils.parseQuery(conquery, test.getRawQuery());
final ManagedExecutionId id = IntegrationUtils.assertQueryResult(conquery, query, 4L, ExecutionState.DONE, conquery.getTestUser(), 201);
assertThat(id).isNotNull();
final MetaStorage metaStorage = conquery.getMetaStorage();
final ManagedQuery execution = (ManagedQuery) metaStorage.getExecution(id);
// Normal reuse
{
final ConceptQuery reused = new ConceptQuery(new CQReusedQuery(execution.getId()));
IntegrationUtils.assertQueryResult(conquery, reused, 2L, ExecutionState.DONE, conquery.getTestUser(), 201);
}
// Reuse by API
{
final URI reexecuteUri = HierarchyHelper.hierarchicalPath(conquery.defaultApiURIBuilder(), QueryResource.class, "reexecute").buildFromMap(Map.of(ResourceConstants.DATASET, conquery.getDataset().getName(), ResourceConstants.QUERY, execution.getId().toString()));
final FullExecutionStatus status = conquery.getClient().target(reexecuteUri).request(MediaType.APPLICATION_JSON).post(Entity.entity(null, MediaType.APPLICATION_JSON_TYPE)).readEntity(FullExecutionStatus.class);
assertThat(status.getStatus()).isIn(ExecutionState.RUNNING, ExecutionState.DONE);
}
// Reuse in SecondaryId
{
final SecondaryIdQuery reused = new SecondaryIdQuery();
reused.setRoot(new CQReusedQuery(execution.getId()));
reused.setSecondaryId(query.getSecondaryId());
IntegrationUtils.assertQueryResult(conquery, reused, 4L, ExecutionState.DONE, conquery.getTestUser(), 201);
}
// Reuse in SecondaryId, but do exclude
{
final SecondaryIdQuery reused = new SecondaryIdQuery();
final CQAnd root = new CQAnd();
reused.setRoot(root);
final CQReusedQuery reuse = new CQReusedQuery(execution.getId());
reuse.setExcludeFromSecondaryId(true);
// We select only a single event of the query by the exact filtering.
final CQConcept cqConcept = new CQConcept();
final ConceptId conceptId = new ConceptId(conquery.getDataset().getId(), "concept");
final Concept<?> concept = conquery.getNamespaceStorage().getConcept(conceptId);
cqConcept.setElements(List.of(concept));
final CQTable cqTable = new CQTable();
cqTable.setConcept(cqConcept);
final CentralRegistry centralRegistry = conquery.getNamespaceStorage().getCentralRegistry();
final Connector connector = centralRegistry.resolve(new ConnectorId(conceptId, "connector1"));
cqTable.setConnector(connector);
cqTable.setFilters(List.of(new FilterValue.CQRealRangeFilter((Filter<Range<BigDecimal>>) centralRegistry.resolve(new FilterId(connector.getId(), "filter")), new Range<>(BigDecimal.valueOf(1.01d), BigDecimal.valueOf(1.01d)))));
cqConcept.setTables(List.of(cqTable));
cqConcept.setExcludeFromSecondaryId(false);
root.setChildren(List.of(reuse, cqConcept));
reused.setSecondaryId(query.getSecondaryId());
IntegrationUtils.assertQueryResult(conquery, reused, 1L, ExecutionState.DONE, conquery.getTestUser(), 201);
}
// Reuse Multiple times with different query types
{
final SecondaryIdQuery reused1 = new SecondaryIdQuery();
reused1.setRoot(new CQReusedQuery(execution.getId()));
reused1.setSecondaryId(query.getSecondaryId());
final ManagedExecutionId reused1Id = IntegrationUtils.assertQueryResult(conquery, reused1, 4L, ExecutionState.DONE, conquery.getTestUser(), 201);
final ManagedQuery execution1 = (ManagedQuery) metaStorage.getExecution(reused1Id);
{
final SecondaryIdQuery reused2 = new SecondaryIdQuery();
reused2.setRoot(new CQReusedQuery(execution1.getId()));
reused2.setSecondaryId(query.getSecondaryId());
final ManagedExecutionId reused2Id = IntegrationUtils.assertQueryResult(conquery, reused2, 4L, ExecutionState.DONE, conquery.getTestUser(), 201);
final ManagedQuery execution2 = (ManagedQuery) metaStorage.getExecution(reused2Id);
assertThat(reused2Id).as("Query should be reused.").isEqualTo(reused1Id);
// Now we change to ConceptQuery
final ConceptQuery reused3 = new ConceptQuery(new CQReusedQuery(execution2.getId()));
IntegrationUtils.assertQueryResult(conquery, reused3, 2L, ExecutionState.DONE, conquery.getTestUser(), 201);
}
{
final SecondaryIdQuery reusedDiffId = new SecondaryIdQuery();
reusedDiffId.setRoot(new CQReusedQuery(execution1.getId()));
// ignored is a single global value and therefore the same as by-PID
reusedDiffId.setSecondaryId(conquery.getNamespace().getStorage().getSecondaryId(new SecondaryIdDescriptionId(conquery.getDataset().getId(), "ignored")));
final ManagedExecutionId executionId = IntegrationUtils.assertQueryResult(conquery, reusedDiffId, 2L, ExecutionState.DONE, conquery.getTestUser(), 201);
assertThat(executionId).as("Query should NOT be reused.").isNotEqualTo(reused1Id);
}
{
// Reuse by another user (create a copy of the actual query)
final SecondaryIdQuery reused = new SecondaryIdQuery();
reused.setRoot(new CQReusedQuery(execution.getId()));
reused.setSecondaryId(query.getSecondaryId());
User shareHolder = new User("shareholder", "ShareHolder", conquery.getMetaStorage());
conquery.getMetaProcessor().addUser(shareHolder);
shareHolder.addPermissions(Set.of(dataset.createPermission(Set.of(Ability.READ)), execution.createPermission(Set.of(Ability.READ))));
ManagedExecutionId copyId = IntegrationUtils.assertQueryResult(conquery, reused, 4L, ExecutionState.DONE, shareHolder, 201);
ManagedExecution<?> copy = metaStorage.getExecution(copyId);
// Contentwise the label and tags should be the same
assertThat(copy).usingRecursiveComparison().comparingOnlyFields("label", "tags").isEqualTo(execution);
// However the Object holding the tags must be different, so the two are not linked here
assertThat(copy.getTags()).isNotSameAs(execution.getTags());
// And the ids must be different
assertThat(copy.getId()).isNotSameAs(execution.getId());
}
}
}
Aggregations