use of com.bakdata.conquery.models.query.results.EntityResult in project conquery by bakdata.
the class TableExportQueryPlan method execute.
@Override
public Optional<MultilineEntityResult> execute(QueryExecutionContext ctx, Entity entity) {
Optional<? extends EntityResult> result = subPlan.execute(ctx, entity);
if (result.isEmpty() || tables.isEmpty()) {
return Optional.empty();
}
List<Object[]> results = new ArrayList<>();
final int totalColumns = positions.values().stream().mapToInt(i -> i).max().getAsInt() + 1;
for (TableExportDescription exportDescription : tables) {
for (Bucket bucket : ctx.getEntityBucketsForTable(entity, exportDescription.getTable())) {
int entityId = entity.getId();
if (!bucket.containsEntity(entityId)) {
continue;
}
int start = bucket.getEntityStart(entityId);
int end = bucket.getEntityEnd(entityId);
for (int event = start; event < end; event++) {
// Export Full-table if it has no validity date.
if (exportDescription.getValidityDateColumn() != null && !bucket.eventIsContainedIn(event, exportDescription.getValidityDateColumn(), CDateSet.create(dateRange))) {
continue;
}
Object[] entry = new Object[totalColumns];
for (Column column : exportDescription.getTable().getColumns()) {
if (!bucket.has(event, column)) {
continue;
}
if (column.equals(exportDescription.getValidityDateColumn())) {
entry[0] = List.of(bucket.getAsDateRange(event, column));
} else {
entry[positions.get(column)] = bucket.createScriptValue(event, column);
}
}
results.add(entry);
}
}
}
return Optional.of(new MultilineEntityResult(entity.getId(), results));
}
use of com.bakdata.conquery.models.query.results.EntityResult in project conquery by bakdata.
the class ExcelRenderer method writeBody.
private int writeBody(SXSSFSheet sheet, List<ResultInfo> infos, Stream<EntityResult> resultLines) {
// Row 0 is the Header the data starts at 1
final AtomicInteger currentRow = new AtomicInteger(1);
final int writtenLines = resultLines.mapToInt(l -> this.writeRowsForEntity(infos, l, currentRow, cfg, sheet)).sum();
// The result was shorter than the number of rows to track, so we auto size here explicitly
if (writtenLines < config.getLastRowToAutosize()) {
setColumnWidthsAndUntrack(sheet);
}
return writtenLines;
}
use of com.bakdata.conquery.models.query.results.EntityResult in project conquery by bakdata.
the class CsvResultGenerationTest method generateExpectedCSV.
private String generateExpectedCSV(List<EntityResult> results, List<ResultInfo> resultInfos, PrintSettings settings) {
List<String> expected = new ArrayList<>();
expected.add(ResultTestUtil.ID_FIELDS.stream().map(info -> info.defaultColumnName(settings)).collect(Collectors.joining(",")) + "," + getResultTypes().stream().map(ResultType::typeInfo).collect(Collectors.joining(",")) + "\n");
results.stream().map(EntityResult.class::cast).forEach(res -> {
for (Object[] line : res.listResultLines()) {
StringJoiner valueJoiner = new StringJoiner(",");
valueJoiner.add(String.valueOf(res.getEntityId()));
valueJoiner.add(String.valueOf(res.getEntityId()));
for (int lIdx = 0; lIdx < line.length; lIdx++) {
Object val = line[lIdx];
if (val == null) {
valueJoiner.add("");
continue;
}
ResultInfo info = resultInfos.get(lIdx);
final String printVal = info.getType().printNullable(settings, val);
valueJoiner.add(printVal.contains(String.valueOf(CONFIG.getCsv().getDelimeter())) ? "\"" + printVal + "\"" : printVal);
}
expected.add(valueJoiner + "\n");
}
});
return expected.stream().collect(Collectors.joining());
}
use of com.bakdata.conquery.models.query.results.EntityResult in project conquery by bakdata.
the class CsvResultGenerationTest method writeAndRead.
@Test
void writeAndRead() throws IOException {
// Prepare every input data
PrintSettings printSettings = new PrintSettings(true, Locale.GERMAN, null, CONFIG, (cer) -> EntityPrintId.from(Integer.toString(cer.getEntityId()), Integer.toString(cer.getEntityId())), (selectInfo) -> selectInfo.getSelect().getLabel());
// The Shard nodes send Object[] but since Jackson is used for deserialization, nested collections are always a list because they are not further specialized
List<EntityResult> results = getTestEntityResults();
ManagedQuery mquery = getTestQuery();
// First we write to the buffer, than we read from it and parse it as TSV
StringWriter writer = new StringWriter();
CsvRenderer renderer = new CsvRenderer(CONFIG.getCsv().createWriter(writer), printSettings);
renderer.toCSV(ResultTestUtil.ID_FIELDS, mquery.getResultInfos(), mquery.streamResults());
String computed = writer.toString();
String expected = generateExpectedCSV(results, mquery.getResultInfos(), printSettings);
log.info("Wrote and than read this csv data: {}", computed);
assertThat(computed).isNotEmpty();
assertThat(computed).isEqualTo(expected);
}
use of com.bakdata.conquery.models.query.results.EntityResult in project conquery by bakdata.
the class ArrowRenderer method write.
public static void write(ArrowWriter writer, VectorSchemaRoot root, RowConsumer[] idWriter, RowConsumer[] valueWriter, PrintIdMapper idMapper, Stream<EntityResult> results, int batchSize) throws IOException {
Preconditions.checkArgument(batchSize > 0, "Batch size needs be larger than 0.");
// TODO add time metric for writing
log.trace("Starting result write");
writer.start();
int batchCount = 0;
int batchLineCount = 0;
Iterator<EntityResult> resultIterator = results.iterator();
while (resultIterator.hasNext()) {
EntityResult cer = resultIterator.next();
for (Object[] line : cer.listResultLines()) {
if (line.length != valueWriter.length) {
throw new IllegalStateException("The number of value writers and values in a result line differs. Writers: " + valueWriter.length + " Line: " + line.length);
}
for (RowConsumer rowConsumer : idWriter) {
// Write id information
rowConsumer.accept(batchLineCount, idMapper.map(cer).getExternalId());
}
for (RowConsumer rowConsumer : valueWriter) {
// Write values
rowConsumer.accept(batchLineCount, line);
}
batchLineCount++;
if (batchLineCount >= batchSize) {
root.setRowCount(batchLineCount);
writer.writeBatch();
root.clear();
batchLineCount = 0;
}
}
}
if (batchLineCount > 0) {
root.setRowCount(batchLineCount);
writer.writeBatch();
root.clear();
batchCount++;
}
log.trace("Wrote {} batches of size {} (last batch might be smaller)", batchCount, batchSize);
writer.end();
}
Aggregations