use of com.bakdata.conquery.apiv1.query.QueryDescription in project conquery by bakdata.
the class ManagedQuery method makeDefaultLabel.
/**
* Creates a default label based on the submitted {@link QueryDescription}.
* The Label is customized by mentioning that a description contained a
* {@link CQExternal}, {@link CQReusedQuery} or {@link CQConcept}, in this order.
* In case of one ore more {@link CQConcept} the distinct labels of the concepts are chosen
* and concatinated until a length of {@value #MAX_CONCEPT_LABEL_CONCAT_LENGTH} is reached.
* All further labels are dropped.
*/
@Override
protected String makeDefaultLabel(PrintSettings cfg) {
final StringBuilder sb = new StringBuilder();
final Map<Class<? extends Visitable>, List<Visitable>> sortedContents = Visitable.stream(query).collect(Collectors.groupingBy(Visitable::getClass));
int sbStartSize = sb.length();
// Check for CQExternal
List<Visitable> externals = sortedContents.getOrDefault(CQExternal.class, Collections.emptyList());
if (!externals.isEmpty()) {
if (sb.length() > 0) {
sb.append(" ");
}
sb.append(C10N.get(CQElementC10n.class, I18n.LOCALE.get()).external());
}
// Check for CQReused
if (sortedContents.containsKey(CQReusedQuery.class)) {
if (sb.length() > 0) {
sb.append(" ");
}
sb.append(C10N.get(CQElementC10n.class, I18n.LOCALE.get()).reused());
}
// Check for CQConcept
if (sortedContents.containsKey(CQConcept.class)) {
if (sb.length() > 0) {
sb.append(" ");
}
// Track length of text we are appending for concepts.
final AtomicInteger length = new AtomicInteger();
sortedContents.get(CQConcept.class).stream().map(CQConcept.class::cast).map(c -> makeLabelWithRootAndChild(c, cfg)).filter(Predicate.not(Strings::isNullOrEmpty)).distinct().takeWhile(elem -> length.addAndGet(elem.length()) < MAX_CONCEPT_LABEL_CONCAT_LENGTH).forEach(label -> sb.append(label).append(" "));
// Last entry will output one Space that we don't want
if (sb.length() > 0) {
sb.deleteCharAt(sb.length() - 1);
}
// If not all Concept could be included in the name, point that out
if (length.get() > MAX_CONCEPT_LABEL_CONCAT_LENGTH) {
sb.append(" ").append(C10N.get(CQElementC10n.class, I18n.LOCALE.get()).furtherConcepts());
}
}
// Fallback to id if nothing could be extracted from the query description
if (sbStartSize == sb.length()) {
sb.append(getId().getExecution());
}
return sb.toString();
}
use of com.bakdata.conquery.apiv1.query.QueryDescription in project conquery by bakdata.
the class QueryProcessor method postQuery.
/**
* Creates a query for all datasets, then submits it for execution on the
* intended dataset.
*/
public ManagedExecution<?> postQuery(Dataset dataset, QueryDescription query, Subject subject) {
log.info("Query posted on Dataset[{}] by User[{{}].", dataset.getId(), subject.getId());
// This maps works as long as we have query visitors that are not configured in anyway.
// So adding a visitor twice would replace the previous one but both would have yielded the same result.
// For the future a better data structure might be desired that also regards similar QueryVisitors of different configuration
ClassToInstanceMap<QueryVisitor> visitors = MutableClassToInstanceMap.create();
query.addVisitors(visitors);
// Initialize checks that need to traverse the query tree
visitors.putInstance(QueryUtils.OnlyReusingChecker.class, new QueryUtils.OnlyReusingChecker());
visitors.putInstance(NamespacedIdentifiableCollector.class, new NamespacedIdentifiableCollector());
final String primaryGroupName = AuthorizationHelper.getPrimaryGroup(subject, storage).map(Group::getName).orElse("none");
visitors.putInstance(ExecutionMetrics.QueryMetricsReporter.class, new ExecutionMetrics.QueryMetricsReporter(primaryGroupName));
// Chain all Consumers
Consumer<Visitable> consumerChain = QueryUtils.getNoOpEntryPoint();
for (QueryVisitor visitor : visitors.values()) {
consumerChain = consumerChain.andThen(visitor);
}
// Apply consumers to the query tree
query.visit(consumerChain);
query.authorize(subject, dataset, visitors);
// After all authorization checks we can now use the actual subject to invoke the query and do not to bubble down the Userish in methods
ExecutionMetrics.reportNamespacedIds(visitors.getInstance(NamespacedIdentifiableCollector.class).getIdentifiables(), primaryGroupName);
ExecutionMetrics.reportQueryClassUsage(query.getClass(), primaryGroupName);
final Namespace namespace = datasetRegistry.get(dataset.getId());
final ExecutionManager executionManager = namespace.getExecutionManager();
// If this is only a re-executing query, try to execute the underlying query instead.
{
final Optional<ManagedExecutionId> executionId = visitors.getInstance(QueryUtils.OnlyReusingChecker.class).getOnlyReused();
final Optional<ManagedExecution<?>> execution = executionId.map(id -> tryReuse(query, id, datasetRegistry, config, executionManager, subject.getUser()));
if (execution.isPresent()) {
return execution.get();
}
}
// Execute the query
return executionManager.runQuery(datasetRegistry, query, subject.getUser(), dataset, config);
}
use of com.bakdata.conquery.apiv1.query.QueryDescription in project conquery by bakdata.
the class SerializingStoreDumpTest method testCorruptKeyDump.
/**
* Tests if entries with corrupted keys are dumped.
*/
@Test
public void testCorruptKeyDump() throws IOException {
// Set dump directory to this tests temp-dir
config.setUnreadableDataDumpDirectory(tmpDir);
{
// Open a store and insert a valid key-value pair (UserId & User)
SerializingStore<UserId, User> store = createSerializedStore(config, env, Validators.newValidator(), USER_STORE_ID);
store.add(new UserId("testU1"), user);
}
{
// Open that store again, with a different config to insert a corrupt entry
// (String & ManagedQuery)
SerializingStore<String, QueryDescription> store = createSerializedStore(config, env, Validators.newValidator(), new StoreInfo<>(USER_STORE_ID.getName(), String.class, QueryDescription.class));
store.add("not a valid conquery Id", cQuery);
}
{
// Reopen the store with the initial value and try to iterate over all entries
// (this triggers the dump or removal of invalid entries)
SerializingStore<UserId, User> store = createSerializedStore(config, env, Validators.newValidator(), USER_STORE_ID);
IterationStatistic expectedResult = new IterationStatistic();
expectedResult.setTotalProcessed(2);
expectedResult.setFailedKeys(1);
expectedResult.setFailedValues(0);
// Iterate (do nothing with the entries themselves)
IterationStatistic result = store.forEach((k, v, s) -> {
});
assertThat(result).isEqualTo(expectedResult);
}
// Test if the correct number of dumpfiles was generated
Condition<File> dumpFileCond = new Condition<>(f -> f.getName().endsWith(SerializingStore.DUMP_FILE_EXTENTION), "dump file");
assertThat(tmpDir.listFiles()).areExactly(1, dumpFileCond);
// Test if the dump is correct
File dumpFile = getDumpFile(dumpFileCond);
assertThat((QueryDescription) Jackson.MAPPER.readerFor(QueryDescription.class).readValue(dumpFile)).isEqualTo(cQuery);
}
use of com.bakdata.conquery.apiv1.query.QueryDescription in project conquery by bakdata.
the class SerializingStoreDumpTest method testCorruptValueDump.
/**
* Tests if entries with corrupted values are dumped.
*/
@Test
public void testCorruptValueDump() throws IOException {
// Set dump directory to this tests temp-dir
config.setUnreadableDataDumpDirectory(tmpDir);
{
// Open a store and insert a valid key-value pair (UserId & User)
SerializingStore<UserId, User> store = createSerializedStore(config, env, Validators.newValidator(), USER_STORE_ID);
store.add(user.getId(), user);
}
{
// Open that store again, with a different config to insert a corrupt entry
// (UserId & ManagedQuery)
SerializingStore<UserId, QueryDescription> store = createSerializedStore(config, env, Validators.newValidator(), new StoreInfo<>(USER_STORE_ID.getName(), UserId.class, QueryDescription.class));
store.add(new UserId("testU2"), cQuery);
}
{
// Reopen the store with the initial value and try to iterate over all entries
// (this triggers the dump or removal of invalid entries)
SerializingStore<UserId, User> store = createSerializedStore(config, env, Validators.newValidator(), USER_STORE_ID);
IterationStatistic expectedResult = new IterationStatistic();
expectedResult.setTotalProcessed(2);
expectedResult.setFailedKeys(0);
expectedResult.setFailedValues(1);
// Iterate (do nothing with the entries themselves)
IterationStatistic result = store.forEach((k, v, s) -> {
});
assertThat(result).isEqualTo(expectedResult);
}
// Test if the correct number of dumpfiles was generated
Condition<File> dumpFileCond = new Condition<>(f -> f.getName().endsWith(SerializingStore.DUMP_FILE_EXTENTION), "dump file");
assertThat(tmpDir.listFiles()).areExactly(1, dumpFileCond);
// Test if the dump is correct
File dumpFile = getDumpFile(dumpFileCond);
assertThat((QueryDescription) Jackson.MAPPER.readerFor(QueryDescription.class).readValue(dumpFile)).isEqualTo(cQuery);
}
use of com.bakdata.conquery.apiv1.query.QueryDescription in project conquery by bakdata.
the class SerializingStoreDumpTest method testCorruptionRemoval.
/**
* Tests if entries with corrupted are removed from the store if configured so.
* The dump itself is not testet.
*/
@Test
public void testCorruptionRemoval() {
log.info("This test will throw some warnings from the SerializingStore.");
// Set config to remove corrupt entries
config.setRemoveUnreadableFromStore(true);
{
// Open a store and insert a valid key-value pair (UserId & User)
SerializingStore<UserId, User> store = createSerializedStore(config, env, Validators.newValidator(), USER_STORE_ID);
store.add(new UserId("testU1"), user);
}
{
// Insert two corrupt entries. One with a corrupt key and the other one with a
// corrupt value
{
SerializingStore<String, QueryDescription> store = createSerializedStore(config, env, Validators.newValidator(), new StoreInfo<>(USER_STORE_ID.getName(), String.class, QueryDescription.class));
store.add("not a valid conquery Id", cQuery);
}
{
SerializingStore<UserId, QueryDescription> store = createSerializedStore(config, env, Validators.newValidator(), new StoreInfo<>(USER_STORE_ID.getName(), UserId.class, QueryDescription.class));
store.add(new UserId("testU2"), cQuery);
}
}
{
// Reopen the store with correct configuration and try to iterate over all
// entries (this triggers the dump or removal of invalid entries)
SerializingStore<UserId, User> store = createSerializedStore(config, env, Validators.newValidator(), USER_STORE_ID);
IterationStatistic expectedResult = new IterationStatistic();
expectedResult.setTotalProcessed(3);
expectedResult.setFailedKeys(1);
expectedResult.setFailedValues(1);
// Iterate (do nothing with the entries themselves)
IterationStatistic result = store.forEach((k, v, s) -> {
});
assertThat(result).isEqualTo(expectedResult);
}
{
// Reopen again to check that the corrupted values have been removed previously
SerializingStore<UserId, User> store = createSerializedStore(config, env, Validators.newValidator(), USER_STORE_ID);
IterationStatistic expectedResult = new IterationStatistic();
expectedResult.setTotalProcessed(1);
expectedResult.setFailedKeys(0);
expectedResult.setFailedValues(0);
// Iterate (do nothing with the entries themselves)
IterationStatistic result = store.forEach((k, v, s) -> {
});
assertThat(result).isEqualTo(expectedResult);
}
}
Aggregations