use of com.linkedin.pinot.core.data.readers.RecordReader in project pinot by linkedin.
the class StarTreeIndexTestSegmentHelper method buildSegment.
private static Schema buildSegment(String segmentDirName, String segmentName, HllConfig hllConfig, boolean enableOffHeapFormat) throws Exception {
final int rows = (int) MathUtils.factorial(NUM_DIMENSIONS) * 100;
Schema schema = new Schema();
for (int i = 0; i < NUM_DIMENSIONS; i++) {
String dimName = "d" + (i + 1);
DimensionFieldSpec dimensionFieldSpec = new DimensionFieldSpec(dimName, FieldSpec.DataType.STRING, true);
schema.addField(dimName, dimensionFieldSpec);
}
schema.setTimeFieldSpec(new TimeFieldSpec(TIME_COLUMN_NAME, FieldSpec.DataType.INT, TimeUnit.DAYS));
for (int i = 0; i < NUM_METRICS; i++) {
String metricName = "m" + (i + 1);
MetricFieldSpec metricFieldSpec = new MetricFieldSpec(metricName, FieldSpec.DataType.INT);
schema.addField(metricName, metricFieldSpec);
}
SegmentGeneratorConfig config = new SegmentGeneratorConfig(schema);
config.setEnableStarTreeIndex(true);
config.setOutDir(segmentDirName);
config.setFormat(FileFormat.AVRO);
config.setSegmentName(segmentName);
config.setHllConfig(hllConfig);
config.setStarTreeIndexSpec(buildStarTreeIndexSpec(enableOffHeapFormat));
Random random = new Random(RANDOM_SEED);
final List<GenericRow> data = new ArrayList<>();
for (int row = 0; row < rows; row++) {
HashMap<String, Object> map = new HashMap<>();
// Dim columns.
for (int i = 0; i < NUM_DIMENSIONS / 2; i++) {
String dimName = schema.getDimensionFieldSpecs().get(i).getName();
map.put(dimName, dimName + "-v" + row % (NUM_DIMENSIONS - i));
}
// Random values make cardinality of d3, d4 column values larger to better test hll
for (int i = NUM_DIMENSIONS / 2; i < NUM_DIMENSIONS; i++) {
String dimName = schema.getDimensionFieldSpecs().get(i).getName();
map.put(dimName, dimName + "-v" + random.nextInt(i * 100));
}
// Metric columns.
for (int i = 0; i < NUM_METRICS; i++) {
String metName = schema.getMetricFieldSpecs().get(i).getName();
map.put(metName, random.nextInt(METRIC_MAX_VALUE));
}
// Time column.
map.put(TIME_COLUMN_NAME, row % 7);
GenericRow genericRow = new GenericRow();
genericRow.init(map);
data.add(genericRow);
}
SegmentIndexCreationDriverImpl driver = new SegmentIndexCreationDriverImpl();
RecordReader reader = new TestUtils.GenericRowRecordReader(schema, data);
driver.init(config, reader);
driver.build();
LOGGER.info("Built segment {} at {}", segmentName, segmentDirName);
return schema;
}
use of com.linkedin.pinot.core.data.readers.RecordReader in project pinot by linkedin.
the class RealtimeSegmentConverter method build.
public void build(SegmentVersion segmentVersion) throws Exception {
// lets create a record reader
RecordReader reader;
if (sortedColumn == null) {
reader = new RealtimeSegmentRecordReader(realtimeSegmentImpl, dataSchema);
} else {
reader = new RealtimeSegmentRecordReader(realtimeSegmentImpl, dataSchema, sortedColumn);
}
SegmentGeneratorConfig genConfig = new SegmentGeneratorConfig(dataSchema);
if (invertedIndexColumns != null && !invertedIndexColumns.isEmpty()) {
for (String column : invertedIndexColumns) {
genConfig.createInvertedIndexForColumn(column);
}
}
if (noDictionaryColumns != null) {
genConfig.setRawIndexCreationColumns(noDictionaryColumns);
}
genConfig.setTimeColumnName(dataSchema.getTimeFieldSpec().getOutgoingTimeColumnName());
genConfig.setSegmentTimeUnit(dataSchema.getTimeFieldSpec().getOutgoingGranularitySpec().getTimeType());
genConfig.setSegmentVersion(segmentVersion);
genConfig.setTableName(tableName);
genConfig.setOutDir(outputPath);
genConfig.setSegmentName(segmentName);
final SegmentIndexCreationDriverImpl driver = new SegmentIndexCreationDriverImpl();
driver.init(genConfig, reader);
driver.build();
}
use of com.linkedin.pinot.core.data.readers.RecordReader in project pinot by linkedin.
the class DefaultAggregationExecutorTest method setupSegment.
/**
* Helper method to setup the index segment on which to perform aggregation tests.
* - Generates a segment with {@link #NUM_METRIC_COLUMNS} and {@link #NUM_ROWS}
* - Random 'double' data filled in the metric columns. The data is also populated
* into the _inputData[], so it can be used to test the results.
*
* @throws Exception
*/
private void setupSegment() throws Exception {
if (INDEX_DIR.exists()) {
FileUtils.deleteQuietly(INDEX_DIR);
}
SegmentGeneratorConfig config = new SegmentGeneratorConfig();
config.setSegmentName(SEGMENT_NAME);
config.setOutDir(INDEX_DIR.getAbsolutePath());
Schema schema = buildSchema();
config.setSchema(schema);
final List<GenericRow> data = new ArrayList<>();
for (int i = 0; i < NUM_ROWS; i++) {
Map<String, Object> map = new HashMap<String, Object>();
for (int j = 0; j < _columns.length; j++) {
String metricName = _columns[j];
double value = _random.nextDouble() * MAX_VALUE;
_inputData[j][i] = value;
map.put(metricName, value);
}
GenericRow genericRow = new GenericRow();
genericRow.init(map);
data.add(genericRow);
_docIdSet[i] = i;
}
SegmentIndexCreationDriverImpl driver = new SegmentIndexCreationDriverImpl();
RecordReader reader = new TestUtils.GenericRowRecordReader(schema, data);
driver.init(config, reader);
driver.build();
_indexSegment = Loaders.IndexSegment.load(new File(INDEX_DIR, driver.getSegmentName()), ReadMode.heap);
}
use of com.linkedin.pinot.core.data.readers.RecordReader in project pinot by linkedin.
the class StringDictionaryPerfTest method buildSegment.
/**
* Helper method to build a segment:
* <ul>
* <li> Segment contains one string column </li>
* <li> Row values for the column are randomly generated strings of length 1 to 100 </li>
* </ul>
*
* @param dictLength Length of the dictionary
* @throws Exception
*/
public void buildSegment(int dictLength) throws Exception {
Schema schema = new Schema();
String segmentName = "perfTestSegment" + System.currentTimeMillis();
_indexDir = new File(TMP_DIR + File.separator + segmentName);
_indexDir.deleteOnExit();
FieldSpec fieldSpec = new DimensionFieldSpec(COLUMN_NAME, FieldSpec.DataType.STRING, true);
schema.addField(fieldSpec);
_dictLength = dictLength;
_inputStrings = new String[dictLength];
SegmentGeneratorConfig config = new SegmentGeneratorConfig(schema);
config.setOutDir(_indexDir.getParent());
config.setFormat(FileFormat.AVRO);
config.setSegmentName(segmentName);
Random random = new Random(System.nanoTime());
final List<GenericRow> data = new ArrayList<>();
Set<String> uniqueStrings = new HashSet<>(dictLength);
int i = 0;
while (i < dictLength) {
HashMap<String, Object> map = new HashMap<>();
String randomString = RandomStringUtils.randomAlphanumeric(1 + random.nextInt(MAX_STRING_LENGTH));
if (uniqueStrings.contains(randomString)) {
continue;
}
_inputStrings[i] = randomString;
uniqueStrings.add(randomString);
map.put("test", _inputStrings[i++]);
GenericRow genericRow = new GenericRow();
genericRow.init(map);
data.add(genericRow);
}
SegmentIndexCreationDriverImpl driver = new SegmentIndexCreationDriverImpl();
RecordReader reader = getGenericRowRecordReader(schema, data);
driver.init(config, reader);
driver.build();
}
use of com.linkedin.pinot.core.data.readers.RecordReader in project pinot by linkedin.
the class RawIndexBenchmark method buildSegment.
/**
* Helper method that builds a segment containing two columns both with data from input file.
* The first column has raw indices (no dictionary), where as the second column is dictionary encoded.
*
* @throws Exception
*/
private File buildSegment() throws Exception {
Schema schema = new Schema();
for (int i = 0; i < NUM_COLUMNS; i++) {
String column = "column_" + i;
DimensionFieldSpec dimensionFieldSpec = new DimensionFieldSpec(column, FieldSpec.DataType.STRING, true);
schema.addField(dimensionFieldSpec);
}
SegmentGeneratorConfig config = new SegmentGeneratorConfig(schema);
config.setRawIndexCreationColumns(Collections.singletonList(_rawIndexColumn));
config.setOutDir(SEGMENT_DIR_NAME);
config.setSegmentName(SEGMENT_NAME);
BufferedReader reader = new BufferedReader(new FileReader(_dataFile));
String value;
final List<GenericRow> rows = new ArrayList<>();
System.out.println("Reading data...");
while ((value = reader.readLine()) != null) {
HashMap<String, Object> map = new HashMap<>();
for (FieldSpec fieldSpec : schema.getAllFieldSpecs()) {
map.put(fieldSpec.getName(), value);
}
GenericRow genericRow = new GenericRow();
genericRow.init(map);
rows.add(genericRow);
_numRows++;
if (_numRows % 1000000 == 0) {
System.out.println("Read rows: " + _numRows);
}
}
System.out.println("Generating segment...");
SegmentIndexCreationDriverImpl driver = new SegmentIndexCreationDriverImpl();
RecordReader recordReader = new TestRecordReader(rows, schema);
driver.init(config, recordReader);
driver.build();
return new File(SEGMENT_DIR_NAME, SEGMENT_NAME);
}
Aggregations