use of com.bakdata.conquery.models.worker.Namespace in project conquery by bakdata.
the class AdminDatasetProcessor method deleteDataset.
/**
* Delete dataset if it is empty.
*/
public synchronized void deleteDataset(Dataset dataset) {
final Namespace namespace = datasetRegistry.get(dataset.getId());
if (!namespace.getStorage().getTables().isEmpty()) {
throw new WebApplicationException(String.format("Cannot delete dataset `%s`, because it still has tables: `%s`", dataset.getId(), namespace.getStorage().getTables().stream().map(Table::getId).map(Objects::toString).collect(Collectors.joining(","))), Response.Status.CONFLICT);
}
datasetRegistry.removeNamespace(dataset.getId());
}
use of com.bakdata.conquery.models.worker.Namespace in project conquery by bakdata.
the class ManagerNode method loadNamespaces.
public void loadNamespaces() {
final Collection<NamespaceStorage> storages = config.getStorage().loadNamespaceStorages();
final ObjectWriter objectWriter = config.configureObjectMapper(Jackson.copyMapperAndInjectables(Jackson.BINARY_MAPPER)).writerWithView(InternalOnly.class);
for (NamespaceStorage namespaceStorage : storages) {
Namespace ns = new Namespace(namespaceStorage, config.isFailOnError(), objectWriter);
datasetRegistry.add(ns);
}
}
use of com.bakdata.conquery.models.worker.Namespace in project conquery by bakdata.
the class ResultArrowProcessor method getArrowResult.
public static <E extends ManagedExecution<?> & SingleTableResult> Response getArrowResult(Function<OutputStream, Function<VectorSchemaRoot, ArrowWriter>> writerProducer, Subject subject, E exec, Dataset dataset, DatasetRegistry datasetRegistry, boolean pretty, String fileExtension, MediaType mediaType, ConqueryConfig config) {
final Namespace namespace = datasetRegistry.get(dataset.getId());
ConqueryMDC.setLocation(subject.getName());
log.info("Downloading results for {} on dataset {}", exec, dataset);
subject.authorize(dataset, Ability.READ);
subject.authorize(dataset, Ability.DOWNLOAD);
subject.authorize(exec, Ability.READ);
// Check if subject is permitted to download on all datasets that were referenced by the query
authorizeDownloadDatasets(subject, exec);
if (!(exec instanceof ManagedQuery || (exec instanceof ManagedForm && ((ManagedForm) exec).getSubQueries().size() == 1))) {
return Response.status(HttpStatus.SC_UNPROCESSABLE_ENTITY, "Execution result is not a single Table").build();
}
// Get the locale extracted by the LocaleFilter
IdPrinter idPrinter = config.getFrontend().getQueryUpload().getIdPrinter(subject, exec, namespace);
final Locale locale = I18n.LOCALE.get();
PrintSettings settings = new PrintSettings(pretty, locale, datasetRegistry, config, idPrinter::createId);
// Collect ResultInfos for id columns and result columns
final List<ResultInfo> resultInfosId = config.getFrontend().getQueryUpload().getIdResultInfos();
final List<ResultInfo> resultInfosExec = exec.getResultInfos();
StreamingOutput out = output -> renderToStream(writerProducer.apply(output), settings, config.getArrow().getBatchSize(), resultInfosId, resultInfosExec, exec.streamResults());
return makeResponseWithFileName(out, exec.getLabelWithoutAutoLabelSuffix(), fileExtension, mediaType, ResultUtil.ContentDispositionOption.ATTACHMENT);
}
use of com.bakdata.conquery.models.worker.Namespace in project conquery by bakdata.
the class ResultCsvProcessor method getResult.
public <E extends ManagedExecution<?> & SingleTableResult> Response getResult(Subject subject, Dataset dataset, E exec, String userAgent, String queryCharset, boolean pretty) {
final Namespace namespace = datasetRegistry.get(dataset.getId());
ConqueryMDC.setLocation(subject.getName());
log.info("Downloading results for {} on dataset {}", exec, dataset);
subject.authorize(namespace.getDataset(), Ability.READ);
subject.authorize(namespace.getDataset(), Ability.DOWNLOAD);
subject.authorize(exec, Ability.READ);
// Check if subject is permitted to download on all datasets that were referenced by the query
authorizeDownloadDatasets(subject, exec);
IdPrinter idPrinter = config.getFrontend().getQueryUpload().getIdPrinter(subject, exec, namespace);
// Get the locale extracted by the LocaleFilter
final Locale locale = I18n.LOCALE.get();
PrintSettings settings = new PrintSettings(pretty, locale, datasetRegistry, config, idPrinter::createId);
Charset charset = determineCharset(userAgent, queryCharset);
StreamingOutput out = os -> {
try (BufferedWriter writer = new BufferedWriter(new OutputStreamWriter(os, charset))) {
CsvRenderer renderer = new CsvRenderer(config.getCsv().createWriter(writer), settings);
renderer.toCSV(config.getFrontend().getQueryUpload().getIdResultInfos(), exec.getResultInfos(), exec.streamResults());
} catch (EofException e) {
log.info("User canceled download");
} catch (Exception e) {
throw new WebApplicationException("Failed to load result", e);
}
};
return makeResponseWithFileName(out, exec.getLabelWithoutAutoLabelSuffix(), "csv", new MediaType("text", "csv", charset.toString()), ResultUtil.ContentDispositionOption.ATTACHMENT);
}
use of com.bakdata.conquery.models.worker.Namespace in project conquery by bakdata.
the class ConceptUpdateAndDeletionTest method execute.
@Override
public void execute(String name, TestConquery testConquery) throws Exception {
StandaloneSupport conquery = testConquery.getSupport(name);
// Read two JSONs with different Trees
final String testJson = In.resource("/tests/query/UPDATE_CONCEPT_TESTS/SIMPLE_TREECONCEPT_Query.json").withUTF8().readAll();
final String testJson2 = In.resource("/tests/query/UPDATE_CONCEPT_TESTS/SIMPLE_TREECONCEPT_2_Query.json").withUTF8().readAll();
final Dataset dataset = conquery.getDataset();
final Namespace namespace = conquery.getNamespace();
final ConceptId conceptId = ConceptId.Parser.INSTANCE.parse(dataset.getName(), "test_tree");
final Concept<?> concept;
final QueryTest test = (QueryTest) JsonIntegrationTest.readJson(dataset, testJson);
final QueryTest test2 = (QueryTest) JsonIntegrationTest.readJson(dataset, testJson2);
// Manually import data, so we can do our own work.
{
ValidatorHelper.failOnError(log, conquery.getValidator().validate(test));
importSecondaryIds(conquery, test.getContent().getSecondaryIds());
conquery.waitUntilWorkDone();
LoadingUtil.importTables(conquery, test.getContent().getTables());
conquery.waitUntilWorkDone();
LoadingUtil.importConcepts(conquery, test.getRawConcepts());
conquery.waitUntilWorkDone();
assertThat(namespace.getStorage().getConcept(conceptId)).isNotNull();
LoadingUtil.importTableContents(conquery, test.getContent().getTables());
conquery.waitUntilWorkDone();
}
final Query query = IntegrationUtils.parseQuery(conquery, test.getRawQuery());
// State before update.
{
log.info("Checking state before update");
// Must contain the concept.
assertThat(namespace.getStorage().getAllConcepts()).filteredOn(con -> con.getId().equals(conceptId)).isNotEmpty();
assertThat(namespace.getStorage().getCentralRegistry().getOptional(conceptId)).isNotEmpty();
for (ShardNode node : conquery.getShardNodes()) {
for (Worker value : node.getWorkers().getWorkers().values()) {
if (!value.getInfo().getDataset().equals(dataset.getId())) {
continue;
}
final ModificationShieldedWorkerStorage workerStorage = value.getStorage();
assertThat(workerStorage.getCentralRegistry().getOptional(conceptId)).isNotEmpty();
assertThat(workerStorage.getAllCBlocks()).describedAs("CBlocks for Worker %s", value.getInfo().getId()).filteredOn(cBlock -> cBlock.getConnector().getConcept().getId().equals(conceptId)).isNotEmpty();
}
}
log.info("Executing query before update");
IntegrationUtils.assertQueryResult(conquery, query, 1L, ExecutionState.DONE, conquery.getTestUser(), 201);
conquery.waitUntilWorkDone();
log.info("Query before update executed");
}
// Load a different concept with the same id (it has different children "C1" that are more than "A1")
// To perform the update, the old concept will be deleted first and the new concept will be added. That means the deletion of concept is also covered here
{
log.info("Executing update");
LoadingUtil.updateConcepts(conquery, test2.getRawConcepts(), Response.Status.Family.SUCCESSFUL);
conquery.waitUntilWorkDone();
log.info("Update executed");
}
// Check state after update.
{
log.info("Checking state after update");
// Must contain the concept now.
assertThat(namespace.getStorage().getAllConcepts()).filteredOn(con -> con.getId().equals(conceptId)).isNotEmpty();
assertThat(namespace.getStorage().getCentralRegistry().getOptional(conceptId)).isNotEmpty();
for (ShardNode node : conquery.getShardNodes()) {
for (Worker value : node.getWorkers().getWorkers().values()) {
if (!value.getInfo().getDataset().equals(dataset.getId())) {
continue;
}
final ModificationShieldedWorkerStorage workerStorage = value.getStorage();
assertThat(workerStorage.getCentralRegistry().getOptional(conceptId)).isNotEmpty();
assertThat(workerStorage.getAllCBlocks()).describedAs("CBlocks for Worker %s", value.getInfo().getId()).filteredOn(cBlock -> cBlock.getConnector().getConcept().getId().equals(conceptId)).isNotEmpty();
}
}
log.info("Executing query after update");
// Assert that it now contains 2 instead of 1.
IntegrationUtils.assertQueryResult(conquery, query, 2L, ExecutionState.DONE, conquery.getTestUser(), 201);
conquery.waitUntilWorkDone();
log.info("Query after update executed");
}
// new Conquery generated after restarting
// StandaloneSupport conquery;
// Restart conquery and assert again, that the data is correct.
{
testConquery.shutdown();
// restart
testConquery.beforeAll();
conquery = testConquery.openDataset(dataset.getId());
log.info("Checking state after re-start");
{
// Must contain the concept.
assertThat(conquery.getNamespace().getStorage().getAllConcepts()).filteredOn(con -> con.getId().equals(conceptId)).isNotEmpty();
assertThat(conquery.getNamespace().getStorage().getCentralRegistry().getOptional(conceptId)).isNotEmpty();
for (ShardNode node : conquery.getShardNodes()) {
for (Worker value : node.getWorkers().getWorkers().values()) {
if (!value.getInfo().getDataset().equals(dataset.getId())) {
continue;
}
final ModificationShieldedWorkerStorage workerStorage = value.getStorage();
assertThat(workerStorage.getCentralRegistry().getOptional(conceptId)).isNotEmpty();
assertThat(workerStorage.getAllCBlocks()).describedAs("CBlocks for Worker %s", value.getInfo().getId()).filteredOn(cBlock -> cBlock.getConnector().getConcept().getId().equals(conceptId)).isNotEmpty();
}
}
log.info("Executing query after restart.");
// Re-assert state.
IntegrationUtils.assertQueryResult(conquery, query, 2L, ExecutionState.DONE, conquery.getTestUser(), 201);
conquery.waitUntilWorkDone();
}
}
// Delete the Concept.
{
log.info("Issuing deletion of import {}", conceptId);
concept = Objects.requireNonNull(conquery.getNamespace().getStorage().getConcept(conceptId));
conquery.getDatasetsProcessor().deleteConcept(concept);
conquery.waitUntilWorkDone();
}
// Check state after deletion.
{
log.info("Checking state after deletion");
// We've deleted the concept so it and it's associated cblock should be gone.
assertThat(conquery.getNamespace().getStorage().getAllConcepts()).filteredOn(con -> con.getId().equals(conceptId)).isEmpty();
assertThat(conquery.getNamespace().getStorage().getCentralRegistry().getOptional(conceptId)).isEmpty();
assertThat(conquery.getShardNodes().stream().flatMap(node -> node.getWorkers().getWorkers().values().stream()).filter(worker -> worker.getInfo().getDataset().equals(dataset.getId())).map(Worker::getStorage)).noneMatch(workerStorage -> workerStorage.getConcept(conceptId) != null).noneMatch(workerStorage -> workerStorage.getAllCBlocks().stream().anyMatch(cBlock -> cBlock.getConnector().getConcept().getId().equals(conceptId)));
log.info("Executing query after deletion (EXPECTING AN EXCEPTION IN THE LOGS!)");
// Issue a query and assert that it is failing.
IntegrationUtils.assertQueryResult(conquery, query, 0L, ExecutionState.FAILED, conquery.getTestUser(), 400);
}
// Restart conquery and assert again, that the state after deletion was maintained.
{
{
testConquery.shutdown();
// restart
testConquery.beforeAll();
conquery = testConquery.openDataset(dataset.getId());
}
// Check state after restart.
{
log.info("Checking state after restart");
// We've deleted the concept so it and it's associated cblock should be gone.
assertThat(conquery.getNamespace().getStorage().getAllConcepts()).filteredOn(con -> con.getId().equals(conceptId)).isEmpty();
assertThat(conquery.getNamespace().getStorage().getCentralRegistry().getOptional(conceptId)).isEmpty();
assertThat(conquery.getShardNodes().stream().flatMap(node -> node.getWorkers().getWorkers().values().stream()).filter(worker -> worker.getInfo().getDataset().equals(dataset.getId())).map(Worker::getStorage)).noneMatch(workerStorage -> workerStorage.getConcept(conceptId) != null).noneMatch(workerStorage -> workerStorage.getAllCBlocks().stream().anyMatch(cBlock -> cBlock.getConnector().getConcept().getId().equals(conceptId)));
log.info("Executing query after restart (EXPECTING AN EXCEPTION IN THE LOGS!)");
// Issue a query and assert that it is failing.
IntegrationUtils.assertQueryResult(conquery, query, 0L, ExecutionState.FAILED, conquery.getTestUser(), 400);
}
}
}
Aggregations