use of com.linkedin.pinot.core.operator.BaseOperator in project pinot by linkedin.
the class HllIndexCreationTest method testColumnStatsWithStarTree.
@Test
public void testColumnStatsWithStarTree() throws Exception {
SegmentWithHllIndexCreateHelper helper = null;
boolean hasException = false;
int maxDocLength = 10000;
try {
LOGGER.debug("================ With StarTree ================");
helper = new SegmentWithHllIndexCreateHelper("withStarTree", getClass().getClassLoader().getResource(AVRO_DATA), timeColumnName, timeUnit, "starTreeSegment");
SegmentIndexCreationDriver driver = helper.build(true, hllConfig);
LOGGER.debug("================ Cardinality ================");
for (String name : helper.getSchema().getColumnNames()) {
LOGGER.debug("* " + name + ": " + driver.getColumnStatisticsCollector(name).getCardinality());
}
LOGGER.debug("Loading ...");
IndexSegment indexSegment = Loaders.IndexSegment.load(helper.getSegmentDirectory(), ReadMode.mmap);
int[] docIdSet = new int[maxDocLength];
for (int i = 0; i < maxDocLength; i++) {
docIdSet[i] = i;
}
Map<String, BaseOperator> dataSourceMap = new HashMap<>();
for (String column : indexSegment.getColumnNames()) {
dataSourceMap.put(column, indexSegment.getDataSource(column));
}
DataBlockCache blockCache = new DataBlockCache(new DataFetcher(dataSourceMap));
blockCache.initNewBlock(docIdSet, 0, maxDocLength);
String[] strings = blockCache.getStringValueArrayForColumn("column1_hll");
Assert.assertEquals(strings.length, maxDocLength);
double[] ints = blockCache.getDoubleValueArrayForColumn("column1");
Assert.assertEquals(ints.length, maxDocLength);
} catch (Exception e) {
hasException = true;
LOGGER.error(e.getMessage());
} finally {
if (helper != null) {
helper.cleanTempDir();
}
Assert.assertEquals(hasException, false);
}
}
use of com.linkedin.pinot.core.operator.BaseOperator in project pinot by linkedin.
the class DataFetcherTest method setup.
@BeforeClass
private void setup() throws Exception {
GenericRow[] segmentData = new GenericRow[NUM_ROWS];
// Generate random dimension and metric values.
for (int i = 0; i < NUM_ROWS; i++) {
double randomDouble = _random.nextDouble();
String randomDoubleString = String.valueOf(randomDouble);
_dimensionValues[i] = randomDoubleString;
_intMetricValues[i] = (int) randomDouble;
_longMetricValues[i] = (long) randomDouble;
_floatMetricValues[i] = (float) randomDouble;
_doubleMetricValues[i] = randomDouble;
HashMap<String, Object> map = new HashMap<>();
map.put(DIMENSION_NAME, _dimensionValues[i]);
map.put(INT_METRIC_NAME, _intMetricValues[i]);
map.put(LONG_METRIC_NAME, _longMetricValues[i]);
map.put(FLOAT_METRIC_NAME, _floatMetricValues[i]);
map.put(DOUBLE_METRIC_NAME, _doubleMetricValues[i]);
map.put(NO_DICT_INT_METRIC_NAME, _intMetricValues[i]);
map.put(NO_DICT_LONG_METRIC_NAME, _longMetricValues[i]);
map.put(NO_DICT_FLOAT_METRIC_NAME, _floatMetricValues[i]);
map.put(NO_DICT_DOUBLE_METRIC_NAME, _doubleMetricValues[i]);
GenericRow genericRow = new GenericRow();
genericRow.init(map);
segmentData[i] = genericRow;
}
// Create an index segment with the random dimension and metric values.
final Schema schema = new Schema();
schema.addField(new DimensionFieldSpec(DIMENSION_NAME, FieldSpec.DataType.STRING, true));
schema.addField(new MetricFieldSpec(INT_METRIC_NAME, FieldSpec.DataType.INT));
schema.addField(new MetricFieldSpec(LONG_METRIC_NAME, FieldSpec.DataType.LONG));
schema.addField(new MetricFieldSpec(FLOAT_METRIC_NAME, FieldSpec.DataType.FLOAT));
schema.addField(new MetricFieldSpec(DOUBLE_METRIC_NAME, FieldSpec.DataType.DOUBLE));
schema.addField(new MetricFieldSpec(NO_DICT_INT_METRIC_NAME, FieldSpec.DataType.INT));
schema.addField(new MetricFieldSpec(NO_DICT_LONG_METRIC_NAME, FieldSpec.DataType.LONG));
schema.addField(new MetricFieldSpec(NO_DICT_FLOAT_METRIC_NAME, FieldSpec.DataType.FLOAT));
schema.addField(new MetricFieldSpec(NO_DICT_DOUBLE_METRIC_NAME, FieldSpec.DataType.DOUBLE));
SegmentGeneratorConfig config = new SegmentGeneratorConfig(schema);
FileUtils.deleteQuietly(new File(INDEX_DIR_PATH));
config.setOutDir(INDEX_DIR_PATH);
config.setSegmentName(SEGMENT_NAME);
config.setRawIndexCreationColumns(Arrays.asList(NO_DICT_INT_METRIC_NAME, NO_DICT_LONG_METRIC_NAME, NO_DICT_FLOAT_METRIC_NAME, NO_DICT_DOUBLE_METRIC_NAME));
SegmentIndexCreationDriverImpl driver = new SegmentIndexCreationDriverImpl();
driver.init(config, new TestDataRecordReader(schema, segmentData));
driver.build();
IndexSegment indexSegment = Loaders.IndexSegment.load(new File(INDEX_DIR_PATH, SEGMENT_NAME), ReadMode.heap);
Map<String, BaseOperator> dataSourceMap = new HashMap<>();
for (String column : indexSegment.getColumnNames()) {
dataSourceMap.put(column, indexSegment.getDataSource(column));
}
// Get a data fetcher for the index segment.
_dataFetcher = new DataFetcher(dataSourceMap);
}
use of com.linkedin.pinot.core.operator.BaseOperator in project pinot by linkedin.
the class DefaultGroupKeyGeneratorTest method setup.
@BeforeClass
private void setup() throws Exception {
GenericRow[] segmentData = new GenericRow[NUM_ROWS];
int value = _random.nextInt(MAX_STEP_LENGTH);
// Generate random values for the segment.
for (int i = 0; i < UNIQUE_ROWS; i++) {
Map<String, Object> map = new HashMap<>();
for (String singleValueColumn : SINGLE_VALUE_COLUMNS) {
map.put(singleValueColumn, value);
value += 1 + _random.nextInt(MAX_STEP_LENGTH);
}
for (String multiValueColumn : MULTI_VALUE_COLUMNS) {
int numMultiValues = 1 + _random.nextInt(MAX_NUM_MULTI_VALUES);
Integer[] values = new Integer[numMultiValues];
for (int k = 0; k < numMultiValues; k++) {
values[k] = value;
value += 1 + _random.nextInt(MAX_STEP_LENGTH);
}
map.put(multiValueColumn, values);
}
GenericRow genericRow = new GenericRow();
genericRow.init(map);
segmentData[i] = genericRow;
}
for (int i = UNIQUE_ROWS; i < NUM_ROWS; i += UNIQUE_ROWS) {
System.arraycopy(segmentData, 0, segmentData, i, UNIQUE_ROWS);
}
// Create an index segment with the random values.
Schema schema = new Schema();
for (String singleValueColumn : SINGLE_VALUE_COLUMNS) {
DimensionFieldSpec dimensionFieldSpec = new DimensionFieldSpec(singleValueColumn, FieldSpec.DataType.INT, true);
schema.addField(dimensionFieldSpec);
}
for (String multiValueColumn : MULTI_VALUE_COLUMNS) {
DimensionFieldSpec dimensionFieldSpec = new DimensionFieldSpec(multiValueColumn, FieldSpec.DataType.INT, false);
schema.addField(dimensionFieldSpec);
}
SegmentGeneratorConfig config = new SegmentGeneratorConfig(schema);
FileUtils.deleteQuietly(new File(INDEX_DIR_PATH));
config.setOutDir(INDEX_DIR_PATH);
config.setSegmentName(SEGMENT_NAME);
SegmentIndexCreationDriverImpl driver = new SegmentIndexCreationDriverImpl();
driver.init(config, new TestDataRecordReader(schema, segmentData));
driver.build();
IndexSegment indexSegment = Loaders.IndexSegment.load(new File(INDEX_DIR_PATH, SEGMENT_NAME), ReadMode.heap);
// Get a data fetcher for the index segment.
Map<String, BaseOperator> dataSourceMap = new HashMap<>();
Map<String, Block> blockMap = new HashMap<>();
for (String column : indexSegment.getColumnNames()) {
DataSource dataSource = indexSegment.getDataSource(column);
dataSourceMap.put(column, dataSource);
blockMap.put(column, dataSource.getNextBlock());
}
// Generate a random test doc id set.
int num1 = _random.nextInt(50);
int num2 = num1 + 1 + _random.nextInt(50);
for (int i = 0; i < 20; i += 2) {
_testDocIdSet[i] = num1 + 50 * i;
_testDocIdSet[i + 1] = num2 + 50 * i;
}
DataFetcher dataFetcher = new DataFetcher(dataSourceMap);
DocIdSetBlock docIdSetBlock = new DocIdSetBlock(_testDocIdSet, _testDocIdSet.length);
ProjectionBlock projectionBlock = new ProjectionBlock(blockMap, new DataBlockCache(dataFetcher), docIdSetBlock);
_transformBlock = new TransformBlock(projectionBlock, new HashMap<String, BlockValSet>());
}
use of com.linkedin.pinot.core.operator.BaseOperator in project pinot by linkedin.
the class ProjectionPlanNode method run.
@Override
public Operator run() {
long start = System.currentTimeMillis();
if (_projectionOperator == null) {
Map<String, BaseOperator> dataSourceMap = new HashMap<String, BaseOperator>();
BReusableFilteredDocIdSetOperator docIdSetOperator = (BReusableFilteredDocIdSetOperator) _docIdSetPlanNode.run();
for (String column : _dataSourcePlanNodeMap.keySet()) {
ColumnarDataSourcePlanNode columnarDataSourcePlanNode = _dataSourcePlanNodeMap.get(column);
BaseOperator operator = columnarDataSourcePlanNode.run();
dataSourceMap.put(column, operator);
}
_projectionOperator = new MProjectionOperator(dataSourceMap, docIdSetOperator);
}
long end = System.currentTimeMillis();
LOGGER.debug("Time take in ProjectionPlanNode: " + (end - start));
return _projectionOperator;
}
use of com.linkedin.pinot.core.operator.BaseOperator in project pinot by linkedin.
the class DefaultAggregationExecutorTest method testAggregation.
/**
* Runs 'sum', 'min' & 'max' aggregation functions on the DefaultAggregationExecutor.
* Asserts that the aggregation results returned by the executor are as expected.
*/
@Test
void testAggregation() {
Map<String, BaseOperator> dataSourceMap = new HashMap<>();
for (String column : _indexSegment.getColumnNames()) {
dataSourceMap.put(column, _indexSegment.getDataSource(column));
}
int totalRawDocs = _indexSegment.getSegmentMetadata().getTotalRawDocs();
MatchEntireSegmentOperator matchEntireSegmentOperator = new MatchEntireSegmentOperator(totalRawDocs);
BReusableFilteredDocIdSetOperator docIdSetOperator = new BReusableFilteredDocIdSetOperator(matchEntireSegmentOperator, totalRawDocs, 10000);
MProjectionOperator projectionOperator = new MProjectionOperator(dataSourceMap, docIdSetOperator);
TransformExpressionOperator transformOperator = new TransformExpressionOperator(projectionOperator, Collections.<TransformExpressionTree>emptyList());
TransformBlock transformBlock = (TransformBlock) transformOperator.nextBlock();
int numAggFuncs = _aggregationInfoList.size();
AggregationFunctionContext[] aggrFuncContextArray = new AggregationFunctionContext[numAggFuncs];
AggregationFunctionInitializer aggFuncInitializer = new AggregationFunctionInitializer(_indexSegment.getSegmentMetadata());
for (int i = 0; i < numAggFuncs; i++) {
AggregationInfo aggregationInfo = _aggregationInfoList.get(i);
aggrFuncContextArray[i] = AggregationFunctionContext.instantiate(aggregationInfo);
aggrFuncContextArray[i].getAggregationFunction().accept(aggFuncInitializer);
}
AggregationExecutor aggregationExecutor = new DefaultAggregationExecutor(aggrFuncContextArray);
aggregationExecutor.init();
aggregationExecutor.aggregate(transformBlock);
aggregationExecutor.finish();
List<Object> result = aggregationExecutor.getResult();
for (int i = 0; i < result.size(); i++) {
double actual = (double) result.get(i);
double expected = computeAggregation(AGGREGATION_FUNCTIONS[i], _inputData[i]);
Assert.assertEquals(actual, expected, "Aggregation mis-match for function " + AGGREGATION_FUNCTIONS[i] + ", Expected: " + expected + " Actual: " + actual);
}
}
Aggregations