use of com.bakdata.conquery.models.query.resultinfo.UniqueNamer in project conquery by bakdata.
the class ArrowRenderer method renderToStream.
public static void renderToStream(Function<VectorSchemaRoot, ArrowWriter> writerProducer, PrintSettings printSettings, int batchSize, List<ResultInfo> idHeaders, List<ResultInfo> resultInfo, Stream<EntityResult> results) throws IOException {
// Combine id and value Fields to one vector to build a schema
final UniqueNamer uniqNamer = new UniqueNamer(printSettings);
final List<Field> idFields = generateFields(idHeaders, uniqNamer);
List<Field> fields = new ArrayList<>(idFields);
fields.addAll(generateFields(resultInfo, uniqNamer));
VectorSchemaRoot root = VectorSchemaRoot.create(new Schema(fields, null), ROOT_ALLOCATOR);
// Build separate pipelines for id and value, as they have different sources but the same target
RowConsumer[] idWriters = generateWriterPipeline(root, 0, idHeaders.size(), printSettings, null);
RowConsumer[] valueWriter = generateWriterPipeline(root, idHeaders.size(), resultInfo.size(), printSettings, resultInfo);
// Write the data
try (ArrowWriter writer = writerProducer.apply(root)) {
write(writer, root, idWriters, valueWriter, printSettings.getIdMapper(), results, batchSize);
}
}
use of com.bakdata.conquery.models.query.resultinfo.UniqueNamer in project conquery by bakdata.
the class CsvLineStreamRenderer method toStream.
public Stream<String> toStream(List<ResultInfo> idHeaders, List<ResultInfo> infos, Stream<EntityResult> resultStream) {
final UniqueNamer uniqNamer = new UniqueNamer(cfg);
Stream.concat(idHeaders.stream(), infos.stream()).map(uniqNamer::getUniqueName).forEach(writer::addValue);
return Stream.concat(Stream.of(writer.writeValuesToString()), createCSVBody(cfg, infos, resultStream));
}
Aggregations