use of org.apache.druid.segment.generator.DataGenerator in project druid by druid-io.
the class ScanBenchmark method setup.
/**
* Setup everything common for benchmarking both the incremental-index and the queriable-index.
*/
@Setup
public void setup() {
log.info("SETUP CALLED AT " + +System.currentTimeMillis());
ComplexMetrics.registerSerde("hyperUnique", new HyperUniquesSerde());
setupQueries();
String[] schemaQuery = schemaAndQuery.split("\\.");
String schemaName = schemaQuery[0];
String queryName = schemaQuery[1];
schemaInfo = GeneratorBasicSchemas.SCHEMA_MAP.get(schemaName);
queryBuilder = SCHEMA_QUERY_MAP.get(schemaName).get(queryName);
queryBuilder.limit(limit);
query = queryBuilder.build();
generator = new DataGenerator(schemaInfo.getColumnSchemas(), System.currentTimeMillis(), schemaInfo.getDataInterval(), rowsPerSegment);
final ScanQueryConfig config = new ScanQueryConfig().setLegacy(false);
factory = new ScanQueryRunnerFactory(new ScanQueryQueryToolChest(config, DefaultGenericQueryMetricsFactory.instance()), new ScanQueryEngine(), new ScanQueryConfig());
}
use of org.apache.druid.segment.generator.DataGenerator in project druid by druid-io.
the class IndexMergeBenchmark method setup.
@Setup
public void setup() throws IOException {
log.info("SETUP CALLED AT " + System.currentTimeMillis());
indexMergerV9 = new IndexMergerV9(JSON_MAPPER, INDEX_IO, getSegmentWriteOutMediumFactory(factoryType));
ComplexMetrics.registerSerde("hyperUnique", new HyperUniquesSerde());
indexesToMerge = new ArrayList<>();
schemaInfo = GeneratorBasicSchemas.SCHEMA_MAP.get(schema);
for (int i = 0; i < numSegments; i++) {
DataGenerator gen = new DataGenerator(schemaInfo.getColumnSchemas(), RNG_SEED + i, schemaInfo.getDataInterval(), rowsPerSegment);
IncrementalIndex incIndex = makeIncIndex();
gen.addToIndex(incIndex, rowsPerSegment);
tmpDir = FileUtils.createTempDir();
log.info("Using temp dir: " + tmpDir.getAbsolutePath());
File indexFile = indexMergerV9.persist(incIndex, tmpDir, new IndexSpec(), null);
QueryableIndex qIndex = INDEX_IO.loadIndex(indexFile);
indexesToMerge.add(qIndex);
}
}
use of org.apache.druid.segment.generator.DataGenerator in project druid by druid-io.
the class TimeseriesBenchmark method setup.
/**
* Setup everything common for benchmarking both the incremental-index and the queriable-index.
*/
@Setup
public void setup() {
log.info("SETUP CALLED AT " + System.currentTimeMillis());
ComplexMetrics.registerSerde("hyperUnique", new HyperUniquesSerde());
setupQueries();
String[] schemaQuery = schemaAndQuery.split("\\.");
String schemaName = schemaQuery[0];
String queryName = schemaQuery[1];
schemaInfo = GeneratorBasicSchemas.SCHEMA_MAP.get(schemaName);
query = SCHEMA_QUERY_MAP.get(schemaName).get(queryName);
generator = new DataGenerator(schemaInfo.getColumnSchemas(), RNG_SEED, schemaInfo.getDataInterval(), rowsPerSegment);
factory = new TimeseriesQueryRunnerFactory(new TimeseriesQueryQueryToolChest(), new TimeseriesQueryEngine(), QueryBenchmarkUtil.NOOP_QUERYWATCHER);
}
use of org.apache.druid.segment.generator.DataGenerator in project druid by druid-io.
the class TopNBenchmark method setup.
/**
* Setup everything common for benchmarking both the incremental-index and the queriable-index.
*/
@Setup
public void setup() {
log.info("SETUP CALLED AT " + System.currentTimeMillis());
ComplexMetrics.registerSerde("hyperUnique", new HyperUniquesSerde());
setupQueries();
String[] schemaQuery = schemaAndQuery.split("\\.");
String schemaName = schemaQuery[0];
String queryName = schemaQuery[1];
schemaInfo = GeneratorBasicSchemas.SCHEMA_MAP.get(schemaName);
queryBuilder = SCHEMA_QUERY_MAP.get(schemaName).get(queryName);
queryBuilder.threshold(threshold);
query = queryBuilder.build();
generator = new DataGenerator(schemaInfo.getColumnSchemas(), RNG_SEED, schemaInfo.getDataInterval(), rowsPerSegment);
factory = new TopNQueryRunnerFactory(new StupidPool<>("TopNBenchmark-compute-bufferPool", new OffheapBufferGenerator("compute", 250000000), 0, Integer.MAX_VALUE), new TopNQueryQueryToolChest(new TopNQueryConfig()), QueryBenchmarkUtil.NOOP_QUERYWATCHER);
}
use of org.apache.druid.segment.generator.DataGenerator in project druid by druid-io.
the class IndexIngestionBenchmark method setup.
@Setup
public void setup() throws JsonProcessingException {
ComplexMetrics.registerSerde("hyperUnique", new HyperUniquesSerde());
schemaInfo = GeneratorBasicSchemas.SCHEMA_MAP.get(schema);
// Creates an AppendableIndexSpec that corresponds to the indexType parametrization.
// It is used in {@code makeIncIndex()} to instanciate an incremental-index of the specified type.
appendableIndexSpec = IncrementalIndexCreator.parseIndexType(indexType);
DataGenerator gen = new DataGenerator(schemaInfo.getColumnSchemas(), RNG_SEED, schemaInfo.getDataInterval().getStartMillis(), IndexPersistBenchmark.getValuesPerTimestamp(rollupOpportunity), 1000.0);
rows = gen.toList(rowsPerSegment);
}
Aggregations