use of com.bakdata.conquery.models.datasets.concepts.Concept in project conquery by bakdata.
the class LoadingUtil method importConcepts.
public static void importConcepts(StandaloneSupport support, ArrayNode rawConcepts) throws JSONException, IOException {
Dataset dataset = support.getDataset();
List<Concept<?>> concepts = ConqueryTestSpec.parseSubTreeList(support, rawConcepts, Concept.class, c -> c.setDataset(support.getDataset()));
for (Concept<?> concept : concepts) {
support.getDatasetsProcessor().addConcept(dataset, concept);
}
}
use of com.bakdata.conquery.models.datasets.concepts.Concept in project conquery by bakdata.
the class CQConcept method createQueryPlan.
@Override
public QPNode createQueryPlan(QueryPlanContext context, ConceptQueryPlan plan) {
final List<Aggregator<?>> conceptAggregators = createAggregators(plan, selects);
List<QPNode> tableNodes = new ArrayList<>();
for (CQTable table : tables) {
List<FilterNode<?>> filters = table.getFilters().stream().map(FilterValue::createNode).collect(Collectors.toList());
// add filter to children
List<Aggregator<?>> aggregators = new ArrayList<>();
aggregators.addAll(conceptAggregators);
final List<Aggregator<?>> connectorAggregators = createAggregators(plan, table.getSelects());
// Exists aggregators hold a reference to their parent FiltersNode so they need to be treated separately.
// They also don't need aggregation as they simply imitate their reference.
List<ExistsAggregator> existsAggregators = connectorAggregators.stream().filter(ExistsAggregator.class::isInstance).map(ExistsAggregator.class::cast).collect(Collectors.toList());
aggregators.addAll(connectorAggregators);
aggregators.removeIf(ExistsAggregator.class::isInstance);
List<Aggregator<CDateSet>> eventDateUnionAggregators = aggregateEventDates ? List.of(new EventDateUnionAggregator(Set.of(table.getConnector().getTable()))) : Collections.emptyList();
aggregators.addAll(eventDateUnionAggregators);
final QPNode filtersNode = getConcept().createConceptQuery(context, filters, aggregators, eventDateUnionAggregators);
// Link up the ExistsAggregators to the node
existsAggregators.forEach(agg -> agg.setReference(filtersNode));
// Select if matching secondaryId available
final boolean hasSelectedSecondaryId = Arrays.stream(table.getConnector().getTable().getColumns()).map(Column::getSecondaryId).filter(Objects::nonNull).anyMatch(o -> Objects.equals(context.getSelectedSecondaryId(), o));
final Column validityDateColumn = selectValidityDateColumn(table);
final ConceptNode node = new ConceptNode(// TODO Don't set validity node, when no validity column exists. See workaround for this and remove it: https://github.com/bakdata/conquery/pull/1362
new ValidityDateNode(validityDateColumn, filtersNode), elements, table, // if the node is excluded, don't pass it into the Node.
!excludeFromSecondaryId && hasSelectedSecondaryId ? context.getSelectedSecondaryId() : null);
tableNodes.add(node);
}
// We always merge on concept level
final QPNode outNode = OrNode.of(tableNodes, aggregateEventDates ? DateAggregationAction.MERGE : DateAggregationAction.BLOCK);
// Link concept-level Exists-select to outer node.
conceptAggregators.stream().filter(aggregator -> aggregator instanceof ExistsAggregator).forEach(aggregator -> ((ExistsAggregator) aggregator).setReference(outNode));
return outNode;
}
use of com.bakdata.conquery.models.datasets.concepts.Concept in project conquery by bakdata.
the class AdminDatasetProcessor method deleteTable.
/**
* Deletes a table if it has no dependents or not forced to do so.
*/
public synchronized List<ConceptId> deleteTable(Table table, boolean force) {
final Namespace namespace = datasetRegistry.get(table.getDataset().getId());
final List<Concept<?>> dependentConcepts = namespace.getStorage().getAllConcepts().stream().flatMap(c -> c.getConnectors().stream()).filter(con -> con.getTable().equals(table)).map(Connector::getConcept).collect(Collectors.toList());
if (force || dependentConcepts.isEmpty()) {
for (Concept<?> concept : dependentConcepts) {
deleteConcept(concept);
}
namespace.getStorage().getAllImports().stream().filter(imp -> imp.getTable().equals(table)).forEach(this::deleteImport);
namespace.getStorage().removeTable(table.getId());
namespace.sendToAll(new RemoveTable(table));
}
return dependentConcepts.stream().map(Concept::getId).collect(Collectors.toList());
}
use of com.bakdata.conquery.models.datasets.concepts.Concept in project conquery by bakdata.
the class NamespacedStorage method decorateConceptStore.
private void decorateConceptStore(IdentifiableStore<Concept<?>> store) {
store.onAdd(concept -> {
if (concept.getDataset() != null && !concept.getDataset().equals(dataset.get())) {
throw new IllegalStateException("Concept is not for this dataset.");
}
concept.setDataset(dataset.get());
concept.initElements();
concept.getSelects().forEach(centralRegistry::register);
for (Connector connector : concept.getConnectors()) {
centralRegistry.register(connector);
connector.collectAllFilters().forEach(centralRegistry::register);
connector.getSelects().forEach(centralRegistry::register);
connector.getValidityDates().forEach(centralRegistry::register);
}
// add imports of table
if (isRegisterImports()) {
for (Import imp : getAllImports()) {
for (Connector con : concept.getConnectors()) {
if (con.getTable().equals(imp.getTable())) {
con.addImport(imp);
}
}
}
}
if (concept instanceof TreeConcept) {
((TreeConcept) concept).getAllChildren().values().forEach(centralRegistry::register);
}
}).onRemove(concept -> {
concept.getSelects().forEach(centralRegistry::remove);
// see #146 remove from Dataset.concepts
for (Connector connector : concept.getConnectors()) {
connector.getSelects().forEach(centralRegistry::remove);
connector.collectAllFilters().forEach(centralRegistry::remove);
connector.getValidityDates().forEach(centralRegistry::remove);
centralRegistry.remove(connector);
}
if (concept instanceof TreeConcept) {
((TreeConcept) concept).getAllChildren().values().forEach(centralRegistry::remove);
}
});
}
use of com.bakdata.conquery.models.datasets.concepts.Concept in project conquery by bakdata.
the class ConceptUpdateAndDeletionTest method execute.
@Override
public void execute(String name, TestConquery testConquery) throws Exception {
StandaloneSupport conquery = testConquery.getSupport(name);
// Read two JSONs with different Trees
final String testJson = In.resource("/tests/query/UPDATE_CONCEPT_TESTS/SIMPLE_TREECONCEPT_Query.json").withUTF8().readAll();
final String testJson2 = In.resource("/tests/query/UPDATE_CONCEPT_TESTS/SIMPLE_TREECONCEPT_2_Query.json").withUTF8().readAll();
final Dataset dataset = conquery.getDataset();
final Namespace namespace = conquery.getNamespace();
final ConceptId conceptId = ConceptId.Parser.INSTANCE.parse(dataset.getName(), "test_tree");
final Concept<?> concept;
final QueryTest test = (QueryTest) JsonIntegrationTest.readJson(dataset, testJson);
final QueryTest test2 = (QueryTest) JsonIntegrationTest.readJson(dataset, testJson2);
// Manually import data, so we can do our own work.
{
ValidatorHelper.failOnError(log, conquery.getValidator().validate(test));
importSecondaryIds(conquery, test.getContent().getSecondaryIds());
conquery.waitUntilWorkDone();
LoadingUtil.importTables(conquery, test.getContent().getTables());
conquery.waitUntilWorkDone();
LoadingUtil.importConcepts(conquery, test.getRawConcepts());
conquery.waitUntilWorkDone();
assertThat(namespace.getStorage().getConcept(conceptId)).isNotNull();
LoadingUtil.importTableContents(conquery, test.getContent().getTables());
conquery.waitUntilWorkDone();
}
final Query query = IntegrationUtils.parseQuery(conquery, test.getRawQuery());
// State before update.
{
log.info("Checking state before update");
// Must contain the concept.
assertThat(namespace.getStorage().getAllConcepts()).filteredOn(con -> con.getId().equals(conceptId)).isNotEmpty();
assertThat(namespace.getStorage().getCentralRegistry().getOptional(conceptId)).isNotEmpty();
for (ShardNode node : conquery.getShardNodes()) {
for (Worker value : node.getWorkers().getWorkers().values()) {
if (!value.getInfo().getDataset().equals(dataset.getId())) {
continue;
}
final ModificationShieldedWorkerStorage workerStorage = value.getStorage();
assertThat(workerStorage.getCentralRegistry().getOptional(conceptId)).isNotEmpty();
assertThat(workerStorage.getAllCBlocks()).describedAs("CBlocks for Worker %s", value.getInfo().getId()).filteredOn(cBlock -> cBlock.getConnector().getConcept().getId().equals(conceptId)).isNotEmpty();
}
}
log.info("Executing query before update");
IntegrationUtils.assertQueryResult(conquery, query, 1L, ExecutionState.DONE, conquery.getTestUser(), 201);
conquery.waitUntilWorkDone();
log.info("Query before update executed");
}
// Load a different concept with the same id (it has different children "C1" that are more than "A1")
// To perform the update, the old concept will be deleted first and the new concept will be added. That means the deletion of concept is also covered here
{
log.info("Executing update");
LoadingUtil.updateConcepts(conquery, test2.getRawConcepts(), Response.Status.Family.SUCCESSFUL);
conquery.waitUntilWorkDone();
log.info("Update executed");
}
// Check state after update.
{
log.info("Checking state after update");
// Must contain the concept now.
assertThat(namespace.getStorage().getAllConcepts()).filteredOn(con -> con.getId().equals(conceptId)).isNotEmpty();
assertThat(namespace.getStorage().getCentralRegistry().getOptional(conceptId)).isNotEmpty();
for (ShardNode node : conquery.getShardNodes()) {
for (Worker value : node.getWorkers().getWorkers().values()) {
if (!value.getInfo().getDataset().equals(dataset.getId())) {
continue;
}
final ModificationShieldedWorkerStorage workerStorage = value.getStorage();
assertThat(workerStorage.getCentralRegistry().getOptional(conceptId)).isNotEmpty();
assertThat(workerStorage.getAllCBlocks()).describedAs("CBlocks for Worker %s", value.getInfo().getId()).filteredOn(cBlock -> cBlock.getConnector().getConcept().getId().equals(conceptId)).isNotEmpty();
}
}
log.info("Executing query after update");
// Assert that it now contains 2 instead of 1.
IntegrationUtils.assertQueryResult(conquery, query, 2L, ExecutionState.DONE, conquery.getTestUser(), 201);
conquery.waitUntilWorkDone();
log.info("Query after update executed");
}
// new Conquery generated after restarting
// StandaloneSupport conquery;
// Restart conquery and assert again, that the data is correct.
{
testConquery.shutdown();
// restart
testConquery.beforeAll();
conquery = testConquery.openDataset(dataset.getId());
log.info("Checking state after re-start");
{
// Must contain the concept.
assertThat(conquery.getNamespace().getStorage().getAllConcepts()).filteredOn(con -> con.getId().equals(conceptId)).isNotEmpty();
assertThat(conquery.getNamespace().getStorage().getCentralRegistry().getOptional(conceptId)).isNotEmpty();
for (ShardNode node : conquery.getShardNodes()) {
for (Worker value : node.getWorkers().getWorkers().values()) {
if (!value.getInfo().getDataset().equals(dataset.getId())) {
continue;
}
final ModificationShieldedWorkerStorage workerStorage = value.getStorage();
assertThat(workerStorage.getCentralRegistry().getOptional(conceptId)).isNotEmpty();
assertThat(workerStorage.getAllCBlocks()).describedAs("CBlocks for Worker %s", value.getInfo().getId()).filteredOn(cBlock -> cBlock.getConnector().getConcept().getId().equals(conceptId)).isNotEmpty();
}
}
log.info("Executing query after restart.");
// Re-assert state.
IntegrationUtils.assertQueryResult(conquery, query, 2L, ExecutionState.DONE, conquery.getTestUser(), 201);
conquery.waitUntilWorkDone();
}
}
// Delete the Concept.
{
log.info("Issuing deletion of import {}", conceptId);
concept = Objects.requireNonNull(conquery.getNamespace().getStorage().getConcept(conceptId));
conquery.getDatasetsProcessor().deleteConcept(concept);
conquery.waitUntilWorkDone();
}
// Check state after deletion.
{
log.info("Checking state after deletion");
// We've deleted the concept so it and it's associated cblock should be gone.
assertThat(conquery.getNamespace().getStorage().getAllConcepts()).filteredOn(con -> con.getId().equals(conceptId)).isEmpty();
assertThat(conquery.getNamespace().getStorage().getCentralRegistry().getOptional(conceptId)).isEmpty();
assertThat(conquery.getShardNodes().stream().flatMap(node -> node.getWorkers().getWorkers().values().stream()).filter(worker -> worker.getInfo().getDataset().equals(dataset.getId())).map(Worker::getStorage)).noneMatch(workerStorage -> workerStorage.getConcept(conceptId) != null).noneMatch(workerStorage -> workerStorage.getAllCBlocks().stream().anyMatch(cBlock -> cBlock.getConnector().getConcept().getId().equals(conceptId)));
log.info("Executing query after deletion (EXPECTING AN EXCEPTION IN THE LOGS!)");
// Issue a query and assert that it is failing.
IntegrationUtils.assertQueryResult(conquery, query, 0L, ExecutionState.FAILED, conquery.getTestUser(), 400);
}
// Restart conquery and assert again, that the state after deletion was maintained.
{
{
testConquery.shutdown();
// restart
testConquery.beforeAll();
conquery = testConquery.openDataset(dataset.getId());
}
// Check state after restart.
{
log.info("Checking state after restart");
// We've deleted the concept so it and it's associated cblock should be gone.
assertThat(conquery.getNamespace().getStorage().getAllConcepts()).filteredOn(con -> con.getId().equals(conceptId)).isEmpty();
assertThat(conquery.getNamespace().getStorage().getCentralRegistry().getOptional(conceptId)).isEmpty();
assertThat(conquery.getShardNodes().stream().flatMap(node -> node.getWorkers().getWorkers().values().stream()).filter(worker -> worker.getInfo().getDataset().equals(dataset.getId())).map(Worker::getStorage)).noneMatch(workerStorage -> workerStorage.getConcept(conceptId) != null).noneMatch(workerStorage -> workerStorage.getAllCBlocks().stream().anyMatch(cBlock -> cBlock.getConnector().getConcept().getId().equals(conceptId)));
log.info("Executing query after restart (EXPECTING AN EXCEPTION IN THE LOGS!)");
// Issue a query and assert that it is failing.
IntegrationUtils.assertQueryResult(conquery, query, 0L, ExecutionState.FAILED, conquery.getTestUser(), 400);
}
}
}
Aggregations