use of com.bakdata.conquery.models.events.Bucket in project conquery by bakdata.
the class SecondaryIdQueryPlan method executeQueriesWithoutSecondaryId.
private void executeQueriesWithoutSecondaryId(QueryExecutionContext ctx, Entity entity, Table currentTable) {
nextTable(ctx, currentTable);
final List<Bucket> tableBuckets = ctx.getBucketManager().getEntityBucketsForTable(entity, currentTable);
for (Bucket bucket : tableBuckets) {
int entityId = entity.getId();
nextBlock(bucket);
if (!bucket.containsEntity(entityId) || !isOfInterest(bucket)) {
continue;
}
int start = bucket.getEntityStart(entityId);
int end = bucket.getEntityEnd(entityId);
for (int event = start; event < end; event++) {
for (ConceptQueryPlan child : childPerKey.values()) {
child.nextEvent(bucket, event);
}
}
}
}
use of com.bakdata.conquery.models.events.Bucket in project conquery by bakdata.
the class TableExportQueryPlan method execute.
@Override
public Optional<MultilineEntityResult> execute(QueryExecutionContext ctx, Entity entity) {
Optional<? extends EntityResult> result = subPlan.execute(ctx, entity);
if (result.isEmpty() || tables.isEmpty()) {
return Optional.empty();
}
List<Object[]> results = new ArrayList<>();
final int totalColumns = positions.values().stream().mapToInt(i -> i).max().getAsInt() + 1;
for (TableExportDescription exportDescription : tables) {
for (Bucket bucket : ctx.getEntityBucketsForTable(entity, exportDescription.getTable())) {
int entityId = entity.getId();
if (!bucket.containsEntity(entityId)) {
continue;
}
int start = bucket.getEntityStart(entityId);
int end = bucket.getEntityEnd(entityId);
for (int event = start; event < end; event++) {
// Export Full-table if it has no validity date.
if (exportDescription.getValidityDateColumn() != null && !bucket.eventIsContainedIn(event, exportDescription.getValidityDateColumn(), CDateSet.create(dateRange))) {
continue;
}
Object[] entry = new Object[totalColumns];
for (Column column : exportDescription.getTable().getColumns()) {
if (!bucket.has(event, column)) {
continue;
}
if (column.equals(exportDescription.getValidityDateColumn())) {
entry[0] = List.of(bucket.getAsDateRange(event, column));
} else {
entry[positions.get(column)] = bucket.createScriptValue(event, column);
}
}
results.add(entry);
}
}
}
return Optional.of(new MultilineEntityResult(entity.getId(), results));
}
use of com.bakdata.conquery.models.events.Bucket in project conquery by bakdata.
the class ImportJob method sendBuckets.
/**
* select, then send buckets.
*/
private Map<WorkerId, Set<BucketId>> sendBuckets(Map<Integer, Integer> starts, Map<Integer, Integer> lengths, DictionaryMapping primaryMapping, Import imp, Map<Integer, List<Integer>> buckets2LocalEntities, ColumnStore[] storesSorted) throws JsonProcessingException {
Map<WorkerId, Set<BucketId>> newWorkerAssignments = new HashMap<>();
final ProgressReporter subJob = getProgressReporter().subJob(buckets2LocalEntities.size());
for (Map.Entry<Integer, List<Integer>> bucket2entities : buckets2LocalEntities.entrySet()) {
WorkerInformation responsibleWorker = Objects.requireNonNull(namespace.getResponsibleWorkerForBucket(bucket2entities.getKey()), () -> "No responsible worker for Bucket#" + bucket2entities.getKey());
awaitFreeJobQueue(responsibleWorker);
final Bucket bucket = selectBucket(starts, lengths, storesSorted, primaryMapping, imp, bucket2entities.getKey(), bucket2entities.getValue());
newWorkerAssignments.computeIfAbsent(responsibleWorker.getId(), (ignored) -> new HashSet<>()).add(bucket.getId());
log.trace("Sending Bucket[{}] to {}", bucket.getId(), responsibleWorker.getId());
responsibleWorker.send(ImportBucket.forBucket(bucket));
subJob.report(1);
}
subJob.done();
return newWorkerAssignments;
}
use of com.bakdata.conquery.models.events.Bucket in project conquery by bakdata.
the class ConceptQueryPlan method execute.
@Override
public Optional<SinglelineEntityResult> execute(QueryExecutionContext ctx, Entity entity) {
// Only override if none has been set from a higher level
ctx = QueryUtils.determineDateAggregatorForContext(ctx, this::getValidityDateAggregator);
if (!isOfInterest(entity)) {
return Optional.empty();
}
// Always do one go-round with ALL_IDS_TABLE.
nextTable(ctx, ctx.getStorage().getDataset().getAllIdsTable());
nextBlock(EmptyBucket.getInstance());
nextEvent(EmptyBucket.getInstance(), 0);
for (Table currentTable : requiredTables.get()) {
if (Dataset.isAllIdsTable(currentTable)) {
continue;
}
nextTable(ctx, currentTable);
final List<Bucket> tableBuckets = ctx.getBucketManager().getEntityBucketsForTable(entity, currentTable);
log.trace("Table[{}] has {} buckets for Entity[{}]", currentTable, tableBuckets, entity);
for (Bucket bucket : tableBuckets) {
if (!isOfInterest(bucket)) {
continue;
}
nextBlock(bucket);
int start = bucket.getEntityStart(entity.getId());
int end = bucket.getEntityEnd(entity.getId());
for (int event = start; event < end; event++) {
nextEvent(bucket, event);
}
}
}
if (isContained()) {
return Optional.of(createResult());
}
return Optional.empty();
}
use of com.bakdata.conquery.models.events.Bucket in project conquery by bakdata.
the class SecondaryIdQueryPlan method executeQueriesWithSecondaryId.
private void executeQueriesWithSecondaryId(QueryExecutionContext ctx, Entity entity, Column secondaryIdColumnId) {
QueryExecutionContext ctxWithPhase = ctx.withActiveSecondaryId(getSecondaryId());
Table currentTable = secondaryIdColumnId.getTable();
nextTable(ctxWithPhase, currentTable);
final List<Bucket> tableBuckets = ctx.getBucketManager().getEntityBucketsForTable(entity, currentTable);
for (Bucket bucket : tableBuckets) {
int entityId = entity.getId();
nextBlock(bucket);
if (!bucket.containsEntity(entityId)) {
continue;
}
if (!isOfInterest(bucket)) {
continue;
}
int start = bucket.getEntityStart(entityId);
int end = bucket.getEntityEnd(entityId);
for (int event = start; event < end; event++) {
// we ignore events with no value in the secondaryIdColumn
if (!bucket.has(event, secondaryIdColumnId)) {
continue;
}
String key = ((String) bucket.createScriptValue(event, secondaryIdColumnId));
final ConceptQueryPlan plan = childPerKey.computeIfAbsent(key, k -> createChild(ctxWithPhase, bucket));
plan.nextEvent(bucket, event);
}
}
}
Aggregations