use of com.bakdata.conquery.models.worker.DatasetRegistry in project conquery by bakdata.
the class ResultExcelProcessor method getExcelResult.
public <E extends ManagedExecution<?> & SingleTableResult> Response getExcelResult(Subject subject, E exec, DatasetId datasetId, boolean pretty) {
ConqueryMDC.setLocation(subject.getName());
final Namespace namespace = datasetRegistry.get(datasetId);
Dataset dataset = namespace.getDataset();
subject.authorize(dataset, Ability.READ);
subject.authorize(dataset, Ability.DOWNLOAD);
subject.authorize(exec, Ability.READ);
IdPrinter idPrinter = config.getFrontend().getQueryUpload().getIdPrinter(subject, exec, namespace);
final Locale locale = I18n.LOCALE.get();
PrintSettings settings = new PrintSettings(pretty, locale, datasetRegistry, config, idPrinter::createId);
ExcelRenderer excelRenderer = new ExcelRenderer(config.getExcel(), settings);
StreamingOutput out = output -> excelRenderer.renderToStream(config.getFrontend().getQueryUpload().getIdResultInfos(), (ManagedExecution<?> & SingleTableResult) exec, output);
return makeResponseWithFileName(out, exec.getLabelWithoutAutoLabelSuffix(), "xlsx", MEDIA_TYPE, ResultUtil.ContentDispositionOption.ATTACHMENT);
}
use of com.bakdata.conquery.models.worker.DatasetRegistry in project conquery by bakdata.
the class CopyUserTest method testUserCopy.
@Test
void testUserCopy() {
final DatasetRegistry registry = new DatasetRegistry(0);
MetaStorage storage = new NonPersistentStoreFactory().createMetaStorage();
registry.setMetaStorage(storage);
// Create test role
Role role = new Role("role", "role", storage);
storage.addRole(role);
role.addPermission(DatasetPermission.onInstance(Ability.READ, new DatasetId("dataset0")));
// Create test group
Group group = new Group("group", "group", storage);
storage.addGroup(group);
group.addPermission(DatasetPermission.onInstance(Ability.READ, new DatasetId("dataset1")));
// Create original user with role and group mapping
User originUser = new User("user", "user", storage);
storage.addUser(originUser);
originUser.addRole(role);
group.addMember(originUser);
// Do copy
User copy = AuthorizationController.flatCopyUser(originUser, "copytest", storage);
// Check that it is not the same user
assertThat(copy).usingRecursiveComparison().isNotEqualTo(originUser);
// Check that the copy does not have any mappings
assertThat(group.containsMember(copy)).isFalse();
assertThat(copy.getRoles()).isEmpty();
// Check that the flat map worked
assertThat(copy.getPermissions()).containsExactlyInAnyOrderElementsOf(originUser.getEffectivePermissions());
}
use of com.bakdata.conquery.models.worker.DatasetRegistry in project conquery by bakdata.
the class AdminResource method getQueries.
@GET
@Path("/queries")
public FullExecutionStatus[] getQueries(@Auth Subject currentUser, @QueryParam("limit") OptionalLong limit, @QueryParam("since") Optional<String> since) {
final MetaStorage storage = processor.getStorage();
final DatasetRegistry datasetRegistry = processor.getDatasetRegistry();
return storage.getAllExecutions().stream().map(t -> {
try {
return t.buildStatusFull(storage, currentUser, datasetRegistry, processor.getConfig());
} catch (ConqueryError e) {
// Initialization of execution probably failed, so we construct a status based on the overview status
final FullExecutionStatus fullExecutionStatus = new FullExecutionStatus();
t.setStatusBase(currentUser, fullExecutionStatus);
fullExecutionStatus.setStatus(ExecutionState.FAILED);
fullExecutionStatus.setError(e);
return fullExecutionStatus;
}
}).filter(t -> t.getCreatedAt().toLocalDate().isEqual(since.map(LocalDate::parse).orElse(LocalDate.now()))).limit(limit.orElse(100)).toArray(FullExecutionStatus[]::new);
}
use of com.bakdata.conquery.models.worker.DatasetRegistry in project conquery by bakdata.
the class ManagerNode method run.
public void run(ConqueryConfig config, Environment environment) throws InterruptedException {
this.environment = environment;
validator = environment.getValidator();
client = new JerseyClientBuilder(environment).using(config.getJerseyClient()).build(getName());
// Instantiate DatasetRegistry and MetaStorage so they are ready for injection into the object mapper (API + Storage)
// The validator is already injected at this point see Conquery.java
datasetRegistry = new DatasetRegistry(config.getCluster().getEntityBucketSize());
storage = new MetaStorage(datasetRegistry);
datasetRegistry.injectInto(environment.getObjectMapper());
storage.injectInto(environment.getObjectMapper());
jobManager = new JobManager("ManagerNode", config.isFailOnError());
formScanner = new FormScanner();
this.config = config;
config.initialize(this);
// Initialization of internationalization
I18n.init();
RESTServer.configure(config, environment.jersey().getResourceConfig());
maintenanceService = environment.lifecycle().scheduledExecutorService("Maintenance Service").build();
environment.lifecycle().manage(this);
loadNamespaces();
loadMetaStorage();
authController = new AuthorizationController(storage, config.getAuthorizationRealms());
environment.lifecycle().manage(authController);
unprotectedAuthAdmin = AuthServlet.generalSetup(environment.metrics(), config, environment.admin(), environment.getObjectMapper());
unprotectedAuthApi = AuthServlet.generalSetup(environment.metrics(), config, environment.servlets(), environment.getObjectMapper());
// Create AdminServlet first to make it available to the realms
admin = new AdminServlet(this);
authController.externalInit(this, config.getAuthenticationRealms());
// Register default components for the admin interface
admin.register(this);
log.info("Registering ResourcesProvider");
for (Class<? extends ResourcesProvider> resourceProvider : CPSTypeIdResolver.listImplementations(ResourcesProvider.class)) {
try {
ResourcesProvider provider = resourceProvider.getConstructor().newInstance();
provider.registerResources(this);
providers.add(provider);
} catch (Exception e) {
log.error("Failed to register Resource {}", resourceProvider, e);
}
}
try {
formScanner.execute(null, null);
} catch (Exception e) {
Throwables.throwIfUnchecked(e);
throw new RuntimeException(e);
}
environment.admin().addTask(formScanner);
environment.admin().addTask(new QueryCleanupTask(storage, Duration.of(config.getQueries().getOldQueriesTime().getQuantity(), config.getQueries().getOldQueriesTime().getUnit().toChronoUnit())));
environment.admin().addTask(new PermissionCleanupTask(storage));
environment.admin().addTask(new ClearFilterSourceSearch());
environment.admin().addTask(new ReportConsistencyTask(datasetRegistry));
ShutdownTask shutdown = new ShutdownTask();
environment.admin().addTask(shutdown);
environment.lifecycle().addServerLifecycleListener(shutdown);
}
use of com.bakdata.conquery.models.worker.DatasetRegistry in project conquery by bakdata.
the class ResultArrowProcessor method getArrowResult.
public static <E extends ManagedExecution<?> & SingleTableResult> Response getArrowResult(Function<OutputStream, Function<VectorSchemaRoot, ArrowWriter>> writerProducer, Subject subject, E exec, Dataset dataset, DatasetRegistry datasetRegistry, boolean pretty, String fileExtension, MediaType mediaType, ConqueryConfig config) {
final Namespace namespace = datasetRegistry.get(dataset.getId());
ConqueryMDC.setLocation(subject.getName());
log.info("Downloading results for {} on dataset {}", exec, dataset);
subject.authorize(dataset, Ability.READ);
subject.authorize(dataset, Ability.DOWNLOAD);
subject.authorize(exec, Ability.READ);
// Check if subject is permitted to download on all datasets that were referenced by the query
authorizeDownloadDatasets(subject, exec);
if (!(exec instanceof ManagedQuery || (exec instanceof ManagedForm && ((ManagedForm) exec).getSubQueries().size() == 1))) {
return Response.status(HttpStatus.SC_UNPROCESSABLE_ENTITY, "Execution result is not a single Table").build();
}
// Get the locale extracted by the LocaleFilter
IdPrinter idPrinter = config.getFrontend().getQueryUpload().getIdPrinter(subject, exec, namespace);
final Locale locale = I18n.LOCALE.get();
PrintSettings settings = new PrintSettings(pretty, locale, datasetRegistry, config, idPrinter::createId);
// Collect ResultInfos for id columns and result columns
final List<ResultInfo> resultInfosId = config.getFrontend().getQueryUpload().getIdResultInfos();
final List<ResultInfo> resultInfosExec = exec.getResultInfos();
StreamingOutput out = output -> renderToStream(writerProducer.apply(output), settings, config.getArrow().getBatchSize(), resultInfosId, resultInfosExec, exec.streamResults());
return makeResponseWithFileName(out, exec.getLabelWithoutAutoLabelSuffix(), fileExtension, mediaType, ResultUtil.ContentDispositionOption.ATTACHMENT);
}
Aggregations