use of com.bakdata.conquery.models.worker.DatasetRegistry in project conquery by bakdata.
the class ResultCsvProcessor method getResult.
public <E extends ManagedExecution<?> & SingleTableResult> Response getResult(Subject subject, Dataset dataset, E exec, String userAgent, String queryCharset, boolean pretty) {
final Namespace namespace = datasetRegistry.get(dataset.getId());
ConqueryMDC.setLocation(subject.getName());
log.info("Downloading results for {} on dataset {}", exec, dataset);
subject.authorize(namespace.getDataset(), Ability.READ);
subject.authorize(namespace.getDataset(), Ability.DOWNLOAD);
subject.authorize(exec, Ability.READ);
// Check if subject is permitted to download on all datasets that were referenced by the query
authorizeDownloadDatasets(subject, exec);
IdPrinter idPrinter = config.getFrontend().getQueryUpload().getIdPrinter(subject, exec, namespace);
// Get the locale extracted by the LocaleFilter
final Locale locale = I18n.LOCALE.get();
PrintSettings settings = new PrintSettings(pretty, locale, datasetRegistry, config, idPrinter::createId);
Charset charset = determineCharset(userAgent, queryCharset);
StreamingOutput out = os -> {
try (BufferedWriter writer = new BufferedWriter(new OutputStreamWriter(os, charset))) {
CsvRenderer renderer = new CsvRenderer(config.getCsv().createWriter(writer), settings);
renderer.toCSV(config.getFrontend().getQueryUpload().getIdResultInfos(), exec.getResultInfos(), exec.streamResults());
} catch (EofException e) {
log.info("User canceled download");
} catch (Exception e) {
throw new WebApplicationException("Failed to load result", e);
}
};
return makeResponseWithFileName(out, exec.getLabelWithoutAutoLabelSuffix(), "csv", new MediaType("text", "csv", charset.toString()), ResultUtil.ContentDispositionOption.ATTACHMENT);
}
use of com.bakdata.conquery.models.worker.DatasetRegistry in project conquery by bakdata.
the class TestConquery method createSupport.
private synchronized StandaloneSupport createSupport(DatasetId datasetId, String name) {
DatasetRegistry datasets = standaloneCommand.getManager().getDatasetRegistry();
Namespace ns = datasets.get(datasetId);
assertThat(datasets.getShardNodes()).hasSize(2);
// make tmp subdir and change cfg accordingly
File localTmpDir = new File(tmpDir, "tmp_" + name);
if (!localTmpDir.exists()) {
if (!localTmpDir.mkdir()) {
throw new IllegalStateException("Could not create directory for Support");
}
} else {
log.info("Reusing existing folder {} for Support", localTmpDir.getPath());
}
ConqueryConfig localCfg = Cloner.clone(config, Map.of(Validator.class, standaloneCommand.getManager().getEnvironment().getValidator()), IntegrationTests.MAPPER);
StandaloneSupport support = new StandaloneSupport(this, ns, ns.getStorage().getDataset(), localTmpDir, localCfg, standaloneCommand.getManager().getAdmin().getAdminProcessor(), standaloneCommand.getManager().getAdmin().getAdminDatasetProcessor(), // Getting the User from AuthorizationConfig
testUser);
Wait.builder().total(Duration.ofSeconds(5)).stepTime(Duration.ofMillis(5)).build().until(() -> ns.getWorkers().size() == ns.getNamespaces().getShardNodes().size());
support.waitUntilWorkDone();
openSupports.add(support);
return support;
}
use of com.bakdata.conquery.models.worker.DatasetRegistry in project conquery by bakdata.
the class FilteringConceptManipulator method consume.
public void consume(CQConcept concept, DatasetRegistry namespaces) {
List<Select> selects = concept.getSelects();
if (!selectBlockList.isEmpty()) {
selects.removeIf(s -> selectBlockList.contains(s.getId()));
} else if (!selectAllowList.isEmpty()) {
selects.removeIf(s -> !selectAllowList.contains(s.getId()));
}
// Add default selects if none is present anymore
if (selects.isEmpty()) {
concept.setSelects(selectDefault.stream().map(namespaces::resolve).collect(Collectors.toList()));
}
// Handle tables
List<CQTable> tables = concept.getTables();
Iterator<CQTable> it = tables.iterator();
while (it.hasNext()) {
CQTable table = it.next();
if (tableBlockList.contains(table.getConnector().getId())) {
it.remove();
}
if (!tableAllowList.containsKey(table.getConnector().getId())) {
it.remove();
} else {
// If table is allowlisted apply a table manipulator if one exist
TableManipulator tableMan = tableAllowList.get(table.getConnector().getId());
if (tableMan != null) {
tableMan.consume(table, namespaces);
}
}
}
if (tables.isEmpty()) {
throw new IllegalStateException(String.format("After filtering the tables of concept %s, no table was left in the concept. ConceptManipulator: %s", concept, this.toString()));
}
}
use of com.bakdata.conquery.models.worker.DatasetRegistry in project conquery by bakdata.
the class RestartTest method execute.
@Override
public void execute(String name, TestConquery testConquery) throws Exception {
// read test specification
String testJson = In.resource("/tests/query/RESTART_TEST_DATA/SIMPLE_TREECONCEPT_Query.json").withUTF8().readAll();
Validator validator = Validators.newValidator();
EntityIdMap entityIdMap = IdMapSerialisationTest.createTestPersistentMap();
ManagerNode manager = testConquery.getStandaloneCommand().getManager();
AdminDatasetProcessor adminDatasetProcessor = manager.getAdmin().getAdminDatasetProcessor();
AdminProcessor adminProcessor = manager.getAdmin().getAdminProcessor();
StandaloneSupport conquery = testConquery.getSupport(name);
DatasetId dataset = conquery.getDataset().getId();
ConqueryTestSpec test = JsonIntegrationTest.readJson(dataset, testJson);
ValidatorHelper.failOnError(log, validator.validate(test));
test.importRequiredData(conquery);
test.executeTest(conquery);
final int numberOfExecutions = conquery.getMetaStorage().getAllExecutions().size();
// IDMapping Testing
NamespaceStorage namespaceStorage = conquery.getNamespaceStorage();
namespaceStorage.updateIdMapping(entityIdMap);
final Dataset dataset1 = adminDatasetProcessor.addDataset(TEST_DATASET_1);
final Dataset dataset2 = adminDatasetProcessor.addDataset(TEST_DATASET_2);
final Dataset dataset3 = adminDatasetProcessor.addDataset(TEST_DATASET_3);
final Dataset dataset4 = adminDatasetProcessor.addDataset(TEST_DATASET_4);
final Dataset dataset5 = adminDatasetProcessor.addDataset(TEST_DATASET_5);
final Dataset dataset6 = adminDatasetProcessor.addDataset(TEST_DATASET_6);
MetaStorage storage = conquery.getMetaStorage();
Role role = new Role("role", "ROLE", storage);
Role roleToDelete = new Role("roleDelete", "ROLE_DELETE", storage);
User user = new User("user@test.email", "USER", storage);
User userToDelete = new User("userDelete@test.email", "USER_DELETE", storage);
Group group = new Group("group", "GROUP", storage);
Group groupToDelete = new Group("groupDelete", "GROUP_DELETE", storage);
{
// Auth testing (deletion and permission grant)
// build constellation
// TODO USE APIS
adminProcessor.addUser(user);
adminProcessor.addUser(userToDelete);
adminProcessor.addRole(role);
adminProcessor.addRole(roleToDelete);
adminProcessor.addGroup(group);
adminProcessor.addGroup(groupToDelete);
adminProcessor.addRoleTo(user, role);
adminProcessor.addRoleTo(user, roleToDelete);
adminProcessor.addRoleTo(userToDelete, role);
adminProcessor.addRoleTo(userToDelete, roleToDelete);
adminProcessor.addRoleTo(group, role);
adminProcessor.addRoleTo(group, roleToDelete);
adminProcessor.addRoleTo(groupToDelete, role);
adminProcessor.addRoleTo(groupToDelete, roleToDelete);
adminProcessor.addUserToGroup(group, user);
adminProcessor.addUserToGroup(group, userToDelete);
adminProcessor.addUserToGroup(groupToDelete, user);
adminProcessor.addUserToGroup(groupToDelete, userToDelete);
// Adding Permissions
adminProcessor.createPermission(user, dataset1.createPermission(Ability.READ.asSet()));
adminProcessor.createPermission(userToDelete, dataset2.createPermission(Ability.READ.asSet()));
adminProcessor.createPermission(role, dataset3.createPermission(Ability.READ.asSet()));
adminProcessor.createPermission(roleToDelete, dataset4.createPermission(Ability.READ.asSet()));
adminProcessor.createPermission(group, dataset5.createPermission(Ability.READ.asSet()));
adminProcessor.createPermission(groupToDelete, dataset6.createPermission(Ability.READ.asSet()));
// Delete entities
// TODO use API
adminProcessor.deleteUser(userToDelete);
adminProcessor.deleteRole(roleToDelete);
adminProcessor.deleteGroup(groupToDelete);
}
log.info("Shutting down for restart");
testConquery.shutdown();
log.info("Restarting");
testConquery.beforeAll();
final StandaloneSupport support = testConquery.openDataset(dataset);
log.info("Restart complete");
DatasetRegistry datasetRegistry = support.getDatasetsProcessor().getDatasetRegistry();
assertThat(support.getMetaStorage().getAllExecutions().size()).as("Executions after restart").isEqualTo(numberOfExecutions);
test.executeTest(support);
{
// Auth actual tests
User userStored = storage.getUser(user.getId());
assertThat(userStored).isEqualTo(user);
assertThat(storage.getRole(role.getId())).isEqualTo(role);
assertThat(storage.getGroup(group.getId())).isEqualTo(group);
assertThat(storage.getUser(userToDelete.getId())).as("deleted user should stay deleted").isNull();
assertThat(storage.getRole(roleToDelete.getId())).as("deleted role should stay deleted").isNull();
assertThat(storage.getGroup(groupToDelete.getId())).as("deleted group should stay deleted").isNull();
assertThat(userStored.isPermitted(datasetRegistry.get(TEST_DATASET_1.getId()).getDataset(), Ability.READ)).isTrue();
assertThat(userStored.isPermitted(datasetRegistry.get(TEST_DATASET_2.getId()).getDataset(), Ability.READ)).isFalse();
assertThat(userStored.isPermitted(datasetRegistry.get(TEST_DATASET_3.getId()).getDataset(), Ability.READ)).isTrue();
assertThat(userStored.isPermitted(datasetRegistry.get(TEST_DATASET_4.getId()).getDataset(), Ability.READ)).isFalse();
assertThat(userStored.isPermitted(datasetRegistry.get(TEST_DATASET_5.getId()).getDataset(), Ability.READ)).isTrue();
assertThat(userStored.isPermitted(datasetRegistry.get(TEST_DATASET_6.getId()).getDataset(), Ability.READ)).isFalse();
}
EntityIdMap entityIdMapAfterRestart = conquery.getNamespaceStorage().getIdMapping();
assertThat(entityIdMapAfterRestart).isEqualTo(entityIdMap);
// We need to reassign the dataset processor because the instance prio to the restart became invalid
adminDatasetProcessor = testConquery.getStandaloneCommand().getManager().getAdmin().getAdminDatasetProcessor();
// Cleanup
adminDatasetProcessor.deleteDataset(dataset1);
adminDatasetProcessor.deleteDataset(dataset2);
adminDatasetProcessor.deleteDataset(dataset3);
adminDatasetProcessor.deleteDataset(dataset4);
adminDatasetProcessor.deleteDataset(dataset5);
adminDatasetProcessor.deleteDataset(dataset6);
}
Aggregations