use of com.linkedin.pinot.core.indexsegment.IndexSegment in project pinot by linkedin.
the class DataFetcherTest method setup.
@BeforeClass
private void setup() throws Exception {
GenericRow[] segmentData = new GenericRow[NUM_ROWS];
// Generate random dimension and metric values.
for (int i = 0; i < NUM_ROWS; i++) {
double randomDouble = _random.nextDouble();
String randomDoubleString = String.valueOf(randomDouble);
_dimensionValues[i] = randomDoubleString;
_intMetricValues[i] = (int) randomDouble;
_longMetricValues[i] = (long) randomDouble;
_floatMetricValues[i] = (float) randomDouble;
_doubleMetricValues[i] = randomDouble;
HashMap<String, Object> map = new HashMap<>();
map.put(DIMENSION_NAME, _dimensionValues[i]);
map.put(INT_METRIC_NAME, _intMetricValues[i]);
map.put(LONG_METRIC_NAME, _longMetricValues[i]);
map.put(FLOAT_METRIC_NAME, _floatMetricValues[i]);
map.put(DOUBLE_METRIC_NAME, _doubleMetricValues[i]);
map.put(NO_DICT_INT_METRIC_NAME, _intMetricValues[i]);
map.put(NO_DICT_LONG_METRIC_NAME, _longMetricValues[i]);
map.put(NO_DICT_FLOAT_METRIC_NAME, _floatMetricValues[i]);
map.put(NO_DICT_DOUBLE_METRIC_NAME, _doubleMetricValues[i]);
GenericRow genericRow = new GenericRow();
genericRow.init(map);
segmentData[i] = genericRow;
}
// Create an index segment with the random dimension and metric values.
final Schema schema = new Schema();
schema.addField(new DimensionFieldSpec(DIMENSION_NAME, FieldSpec.DataType.STRING, true));
schema.addField(new MetricFieldSpec(INT_METRIC_NAME, FieldSpec.DataType.INT));
schema.addField(new MetricFieldSpec(LONG_METRIC_NAME, FieldSpec.DataType.LONG));
schema.addField(new MetricFieldSpec(FLOAT_METRIC_NAME, FieldSpec.DataType.FLOAT));
schema.addField(new MetricFieldSpec(DOUBLE_METRIC_NAME, FieldSpec.DataType.DOUBLE));
schema.addField(new MetricFieldSpec(NO_DICT_INT_METRIC_NAME, FieldSpec.DataType.INT));
schema.addField(new MetricFieldSpec(NO_DICT_LONG_METRIC_NAME, FieldSpec.DataType.LONG));
schema.addField(new MetricFieldSpec(NO_DICT_FLOAT_METRIC_NAME, FieldSpec.DataType.FLOAT));
schema.addField(new MetricFieldSpec(NO_DICT_DOUBLE_METRIC_NAME, FieldSpec.DataType.DOUBLE));
SegmentGeneratorConfig config = new SegmentGeneratorConfig(schema);
FileUtils.deleteQuietly(new File(INDEX_DIR_PATH));
config.setOutDir(INDEX_DIR_PATH);
config.setSegmentName(SEGMENT_NAME);
config.setRawIndexCreationColumns(Arrays.asList(NO_DICT_INT_METRIC_NAME, NO_DICT_LONG_METRIC_NAME, NO_DICT_FLOAT_METRIC_NAME, NO_DICT_DOUBLE_METRIC_NAME));
SegmentIndexCreationDriverImpl driver = new SegmentIndexCreationDriverImpl();
driver.init(config, new TestDataRecordReader(schema, segmentData));
driver.build();
IndexSegment indexSegment = Loaders.IndexSegment.load(new File(INDEX_DIR_PATH, SEGMENT_NAME), ReadMode.heap);
Map<String, BaseOperator> dataSourceMap = new HashMap<>();
for (String column : indexSegment.getColumnNames()) {
dataSourceMap.put(column, indexSegment.getDataSource(column));
}
// Get a data fetcher for the index segment.
_dataFetcher = new DataFetcher(dataSourceMap);
}
use of com.linkedin.pinot.core.indexsegment.IndexSegment in project pinot by linkedin.
the class StarTreeOnHeapToOffHeapConverter method execute.
@Override
public boolean execute() throws Exception {
File indexDir = new File(_segmentDir);
long start = System.currentTimeMillis();
LOGGER.info("Loading segment {}", indexDir.getName());
IndexSegment segment = Loaders.IndexSegment.load(indexDir, ReadMode.heap);
long end = System.currentTimeMillis();
LOGGER.info("Loaded segment {} in {} ms ", indexDir.getName(), (end - start));
start = end;
StarTreeInterf starTreeOnHeap = segment.getStarTree();
File starTreeOffHeapFile = new File(TMP_DIR, (V1Constants.STAR_TREE_INDEX_FILE + System.currentTimeMillis()));
// Convert the star tree on-heap to off-heap format.
StarTreeSerDe.writeTreeOffHeapFormat(starTreeOnHeap, starTreeOffHeapFile);
// Copy all the indexes into output directory.
File outputDir = new File(_outputDir);
FileUtils.deleteQuietly(outputDir);
FileUtils.copyDirectory(indexDir, outputDir);
// Delete the existing star tree on-heap file from the output directory.
FileUtils.deleteQuietly(new File(_outputDir, V1Constants.STAR_TREE_INDEX_FILE));
// Move the temp star tree off-heap file into the output directory.
FileUtils.moveFile(starTreeOffHeapFile, new File(_outputDir, V1Constants.STAR_TREE_INDEX_FILE));
end = System.currentTimeMillis();
LOGGER.info("Converted segment: {} ms", (end - start));
return true;
}
use of com.linkedin.pinot.core.indexsegment.IndexSegment in project pinot by linkedin.
the class SegmentInfoProvider method readOneSegment.
/**
* Read the metadata of the given segmentFile and collect:
* - Unique metric columns
* - Unique single-value dimension columns
* - Unique values for each single-value dimension columns
*
* @param segmentFile segment file.
* @param uniqueMetrics unique metric columns buffer.
* @param uniqueSingleValueDimensions unique single-value dimension columns buffer.
* @param singleValueDimensionValuesMap single-value dimension columns to unique values map buffer.
* @throws Exception
*/
private void readOneSegment(File segmentFile, Set<String> uniqueMetrics, Set<String> uniqueSingleValueDimensions, Map<String, Set<Object>> singleValueDimensionValuesMap) throws Exception {
// Get segment directory from segment file (decompress if necessary).
File segmentDir;
File tmpDir = null;
if (segmentFile.isFile()) {
tmpDir = File.createTempFile(SEGMENT_INFO_PROVIDER, null, new File(TMP_DIR));
FileUtils.deleteQuietly(tmpDir);
tmpDir.mkdir();
TarGzCompressionUtils.unTar(segmentFile, tmpDir);
segmentDir = tmpDir.listFiles()[0];
} else {
segmentDir = segmentFile;
}
IndexSegment indexSegment = Loaders.IndexSegment.load(segmentDir, ReadMode.heap);
Schema schema = indexSegment.getSegmentMetadata().getSchema();
// Add time column if exists.
String timeColumn = schema.getTimeColumnName();
if (timeColumn != null) {
uniqueSingleValueDimensions.add(timeColumn);
loadValuesForSingleValueDimension(indexSegment, singleValueDimensionValuesMap, timeColumn);
}
// Add all metric columns.
uniqueMetrics.addAll(schema.getMetricNames());
// Add all single-value dimension columns.
for (DimensionFieldSpec fieldSpec : schema.getDimensionFieldSpecs()) {
if (!fieldSpec.isSingleValueField()) {
continue;
}
String column = fieldSpec.getName();
uniqueSingleValueDimensions.add(column);
loadValuesForSingleValueDimension(indexSegment, singleValueDimensionValuesMap, column);
}
if (tmpDir != null) {
FileUtils.deleteQuietly(tmpDir);
}
}
use of com.linkedin.pinot.core.indexsegment.IndexSegment in project pinot by linkedin.
the class DefaultGroupKeyGeneratorTest method setup.
@BeforeClass
private void setup() throws Exception {
GenericRow[] segmentData = new GenericRow[NUM_ROWS];
int value = _random.nextInt(MAX_STEP_LENGTH);
// Generate random values for the segment.
for (int i = 0; i < UNIQUE_ROWS; i++) {
Map<String, Object> map = new HashMap<>();
for (String singleValueColumn : SINGLE_VALUE_COLUMNS) {
map.put(singleValueColumn, value);
value += 1 + _random.nextInt(MAX_STEP_LENGTH);
}
for (String multiValueColumn : MULTI_VALUE_COLUMNS) {
int numMultiValues = 1 + _random.nextInt(MAX_NUM_MULTI_VALUES);
Integer[] values = new Integer[numMultiValues];
for (int k = 0; k < numMultiValues; k++) {
values[k] = value;
value += 1 + _random.nextInt(MAX_STEP_LENGTH);
}
map.put(multiValueColumn, values);
}
GenericRow genericRow = new GenericRow();
genericRow.init(map);
segmentData[i] = genericRow;
}
for (int i = UNIQUE_ROWS; i < NUM_ROWS; i += UNIQUE_ROWS) {
System.arraycopy(segmentData, 0, segmentData, i, UNIQUE_ROWS);
}
// Create an index segment with the random values.
Schema schema = new Schema();
for (String singleValueColumn : SINGLE_VALUE_COLUMNS) {
DimensionFieldSpec dimensionFieldSpec = new DimensionFieldSpec(singleValueColumn, FieldSpec.DataType.INT, true);
schema.addField(dimensionFieldSpec);
}
for (String multiValueColumn : MULTI_VALUE_COLUMNS) {
DimensionFieldSpec dimensionFieldSpec = new DimensionFieldSpec(multiValueColumn, FieldSpec.DataType.INT, false);
schema.addField(dimensionFieldSpec);
}
SegmentGeneratorConfig config = new SegmentGeneratorConfig(schema);
FileUtils.deleteQuietly(new File(INDEX_DIR_PATH));
config.setOutDir(INDEX_DIR_PATH);
config.setSegmentName(SEGMENT_NAME);
SegmentIndexCreationDriverImpl driver = new SegmentIndexCreationDriverImpl();
driver.init(config, new TestDataRecordReader(schema, segmentData));
driver.build();
IndexSegment indexSegment = Loaders.IndexSegment.load(new File(INDEX_DIR_PATH, SEGMENT_NAME), ReadMode.heap);
// Get a data fetcher for the index segment.
Map<String, BaseOperator> dataSourceMap = new HashMap<>();
Map<String, Block> blockMap = new HashMap<>();
for (String column : indexSegment.getColumnNames()) {
DataSource dataSource = indexSegment.getDataSource(column);
dataSourceMap.put(column, dataSource);
blockMap.put(column, dataSource.getNextBlock());
}
// Generate a random test doc id set.
int num1 = _random.nextInt(50);
int num2 = num1 + 1 + _random.nextInt(50);
for (int i = 0; i < 20; i += 2) {
_testDocIdSet[i] = num1 + 50 * i;
_testDocIdSet[i + 1] = num2 + 50 * i;
}
DataFetcher dataFetcher = new DataFetcher(dataSourceMap);
DocIdSetBlock docIdSetBlock = new DocIdSetBlock(_testDocIdSet, _testDocIdSet.length);
ProjectionBlock projectionBlock = new ProjectionBlock(blockMap, new DataBlockCache(dataFetcher), docIdSetBlock);
_transformBlock = new TransformBlock(projectionBlock, new HashMap<String, BlockValSet>());
}
use of com.linkedin.pinot.core.indexsegment.IndexSegment in project pinot by linkedin.
the class BrokerReduceServiceTest method testSumQuery.
@Test
public void testSumQuery() {
BrokerRequest brokerRequest = getSumQuery();
QuerySource querySource = new QuerySource();
querySource.setTableName("midas");
brokerRequest.setQuerySource(querySource);
InstanceRequest instanceRequest = new InstanceRequest(0, brokerRequest);
instanceRequest.setSearchSegments(new ArrayList<String>());
for (IndexSegment segment : _indexSegmentList) {
instanceRequest.addToSearchSegments(segment.getSegmentName());
}
Map<ServerInstance, DataTable> instanceResponseMap = new HashMap<ServerInstance, DataTable>();
try {
QueryRequest queryRequest = new QueryRequest(instanceRequest, TableDataManagerProvider.getServerMetrics());
DataTable instanceResponse1 = _queryExecutor.processQuery(queryRequest, queryRunners);
instanceResponseMap.put(new ServerInstance("localhost:0000"), instanceResponse1);
DataTable instanceResponse2 = _queryExecutor.processQuery(queryRequest, queryRunners);
instanceResponseMap.put(new ServerInstance("localhost:1111"), instanceResponse2);
BrokerResponseNative brokerResponse = _reduceService.reduceOnDataTable(brokerRequest, instanceResponseMap);
LOGGER.info("BrokerResponse is " + brokerResponse.getAggregationResults().get(0));
LOGGER.info("Time used for BrokerResponse is " + brokerResponse.getTimeUsedMs());
} catch (Exception e) {
e.printStackTrace();
// Should never happen
throw new RuntimeException(e.toString(), e);
}
}
Aggregations