use of com.linkedin.pinot.common.utils.DataTable in project pinot by linkedin.
the class IntermediateResultsBlock method getAggregationResultDataTable.
@Nonnull
private DataTable getAggregationResultDataTable() throws Exception {
// Extract each aggregation column name and type from aggregation function context.
int numAggregationFunctions = _aggregationFunctionContexts.length;
String[] columnNames = new String[numAggregationFunctions];
FieldSpec.DataType[] columnTypes = new FieldSpec.DataType[numAggregationFunctions];
for (int i = 0; i < numAggregationFunctions; i++) {
AggregationFunctionContext aggregationFunctionContext = _aggregationFunctionContexts[i];
columnNames[i] = aggregationFunctionContext.getAggregationColumnName();
columnTypes[i] = aggregationFunctionContext.getAggregationFunction().getIntermediateResultDataType();
}
// Build the data table.
DataTableBuilder dataTableBuilder = new DataTableBuilder(new DataSchema(columnNames, columnTypes));
dataTableBuilder.startRow();
for (int i = 0; i < numAggregationFunctions; i++) {
switch(columnTypes[i]) {
case LONG:
dataTableBuilder.setColumn(i, ((Number) _aggregationResult.get(i)).longValue());
break;
case DOUBLE:
dataTableBuilder.setColumn(i, ((Double) _aggregationResult.get(i)).doubleValue());
break;
case OBJECT:
dataTableBuilder.setColumn(i, _aggregationResult.get(i));
break;
default:
throw new UnsupportedOperationException("Unsupported aggregation column data type: " + columnTypes[i] + " for column: " + columnNames[i]);
}
}
dataTableBuilder.finishRow();
DataTable dataTable = dataTableBuilder.build();
return attachMetadataToDataTable(dataTable);
}
use of com.linkedin.pinot.common.utils.DataTable in project pinot by linkedin.
the class BrokerReduceService method setGroupByResults.
/**
* Reduce group-by results from multiple servers and set them into BrokerResponseNative passed in.
*
* @param brokerResponseNative broker response.
* @param aggregationFunctions array of aggregation functions.
* @param groupBy group-by information.
* @param dataTableMap map from server to data table.
*/
@SuppressWarnings("unchecked")
private void setGroupByResults(@Nonnull BrokerResponseNative brokerResponseNative, @Nonnull AggregationFunction[] aggregationFunctions, @Nonnull GroupBy groupBy, @Nonnull Map<ServerInstance, DataTable> dataTableMap) {
int numAggregationFunctions = aggregationFunctions.length;
// Merge results from all data tables.
String[] columnNames = new String[numAggregationFunctions];
Map<String, Object>[] intermediateResultMaps = new Map[numAggregationFunctions];
for (DataTable dataTable : dataTableMap.values()) {
for (int i = 0; i < numAggregationFunctions; i++) {
if (columnNames[i] == null) {
columnNames[i] = dataTable.getString(i, 0);
intermediateResultMaps[i] = dataTable.getObject(i, 1);
} else {
Map<String, Object> mergedIntermediateResultMap = intermediateResultMaps[i];
Map<String, Object> intermediateResultMapToMerge = dataTable.getObject(i, 1);
for (Map.Entry<String, Object> entry : intermediateResultMapToMerge.entrySet()) {
String groupKey = entry.getKey();
Object intermediateResultToMerge = entry.getValue();
if (mergedIntermediateResultMap.containsKey(groupKey)) {
Object mergedIntermediateResult = mergedIntermediateResultMap.get(groupKey);
mergedIntermediateResultMap.put(groupKey, aggregationFunctions[i].merge(mergedIntermediateResult, intermediateResultToMerge));
} else {
mergedIntermediateResultMap.put(groupKey, intermediateResultToMerge);
}
}
}
}
}
// Extract final result maps from the merged intermediate result maps.
Map<String, Comparable>[] finalResultMaps = new Map[numAggregationFunctions];
for (int i = 0; i < numAggregationFunctions; i++) {
Map<String, Object> intermediateResultMap = intermediateResultMaps[i];
Map<String, Comparable> finalResultMap = new HashMap<>();
for (String groupKey : intermediateResultMap.keySet()) {
Object intermediateResult = intermediateResultMap.get(groupKey);
finalResultMap.put(groupKey, aggregationFunctions[i].extractFinalResult(intermediateResult));
}
finalResultMaps[i] = finalResultMap;
}
// Trim the final result maps to topN and set them into the broker response.
AggregationGroupByTrimmingService aggregationGroupByTrimmingService = new AggregationGroupByTrimmingService(aggregationFunctions, (int) groupBy.getTopN());
List<GroupByResult>[] groupByResultLists = aggregationGroupByTrimmingService.trimFinalResults(finalResultMaps);
List<AggregationResult> aggregationResults = new ArrayList<>(numAggregationFunctions);
for (int i = 0; i < numAggregationFunctions; i++) {
List<GroupByResult> groupByResultList = groupByResultLists[i];
List<String> groupByColumns = groupBy.getExpressions();
if (groupByColumns == null) {
groupByColumns = groupBy.getColumns();
}
aggregationResults.add(new AggregationResult(groupByResultList, groupByColumns, columnNames[i]));
}
brokerResponseNative.setAggregationResults(aggregationResults);
}
use of com.linkedin.pinot.common.utils.DataTable in project pinot by linkedin.
the class SelectionOperatorService method reduceWithOrdering.
/**
* Reduce a collection of {@link DataTable}s to selection rows for selection queries with <code>ORDER BY</code>.
* (Broker side)
*
* @param selectionResults {@link Map} from {@link ServerInstance} to {@link DataTable}.
*/
public void reduceWithOrdering(@Nonnull Map<ServerInstance, DataTable> selectionResults) {
for (DataTable dataTable : selectionResults.values()) {
int numRows = dataTable.getNumberOfRows();
for (int rowId = 0; rowId < numRows; rowId++) {
Serializable[] row = SelectionOperatorUtils.extractRowFromDataTable(dataTable, rowId);
SelectionOperatorUtils.addToPriorityQueue(row, _rows, _maxNumRows);
}
}
}
use of com.linkedin.pinot.common.utils.DataTable in project pinot by linkedin.
the class DataTableSerDeTest method testException.
@Test
public void testException() throws IOException {
Exception exception = new UnsupportedOperationException("Caught exception.");
ProcessingException processingException = QueryException.getException(QueryException.QUERY_EXECUTION_ERROR, exception);
String expected = processingException.getMessage();
DataTable dataTable = new DataTableImplV2();
dataTable.addException(processingException);
DataTable newDataTable = DataTableFactory.getDataTable(dataTable.toBytes());
Assert.assertNull(newDataTable.getDataSchema());
Assert.assertEquals(newDataTable.getNumberOfRows(), 0);
String actual = newDataTable.getMetadata().get(DataTable.EXCEPTION_METADATA_KEY + QueryException.QUERY_EXECUTION_ERROR.getErrorCode());
Assert.assertEquals(actual, expected);
}
use of com.linkedin.pinot.common.utils.DataTable in project pinot by linkedin.
the class BaseQueriesTest method getBrokerResponseForQuery.
/**
* Run query on multiple index segments.
* <p>Use this to test the whole flow from server to broker.
* <p>The result should be equivalent to querying 4 identical index segments.
*
* @param query PQL query.
* @return broker response.
*/
protected BrokerResponseNative getBrokerResponseForQuery(String query) {
BrokerRequest brokerRequest = COMPILER.compileToBrokerRequest(query);
// Server side.
Plan plan = PLAN_MAKER.makeInterSegmentPlan(getSegmentDataManagers(), brokerRequest, EXECUTOR_SERVICE, 10_000);
plan.execute();
DataTable instanceResponse = plan.getInstanceResponse();
// Broker side.
BrokerReduceService brokerReduceService = new BrokerReduceService();
Map<ServerInstance, DataTable> dataTableMap = new HashMap<>();
dataTableMap.put(new ServerInstance("localhost:0000"), instanceResponse);
dataTableMap.put(new ServerInstance("localhost:1111"), instanceResponse);
return brokerReduceService.reduceOnDataTable(brokerRequest, dataTableMap);
}
Aggregations