use of javax.annotation.Nonnull in project pinot by linkedin.
the class BrokerRequestHandler method processBrokerRequest.
/**
* Main method to process the request.
* <p>Following lifecycle stages:
* <ul>
* <li>1. Find the candidate servers to be queried for each set of segments from the routing table.</li>
* <li>2. Select servers for each segment set and scatter request to the servers.</li>
* <li>3. Gather responses from the servers.</li>
* <li>4. Deserialize the server responses.</li>
* <li>5. Reduce (merge) the server responses and create a broker response to be returned.</li>
* </ul>
*
* @param brokerRequest broker request to be processed.
* @param scatterGatherStats scatter-gather statistics.
* @param requestId broker request ID.
* @return broker response.
* @throws InterruptedException
*/
@Nonnull
public BrokerResponse processBrokerRequest(@Nonnull BrokerRequest brokerRequest, @Nonnull ScatterGatherStats scatterGatherStats, long requestId) throws InterruptedException {
String tableName = brokerRequest.getQuerySource().getTableName();
ResponseType responseType = BrokerResponseFactory.getResponseType(brokerRequest.getResponseFormat());
LOGGER.debug("Broker Response Type: {}", responseType.name());
String offlineTableName = TableNameBuilder.OFFLINE_TABLE_NAME_BUILDER.forTable(tableName);
if (!_routingTable.routingTableExists(offlineTableName)) {
offlineTableName = null;
}
String realtimeTableName = TableNameBuilder.REALTIME_TABLE_NAME_BUILDER.forTable(tableName);
if (!_routingTable.routingTableExists(realtimeTableName)) {
realtimeTableName = null;
}
if ((offlineTableName == null) && (realtimeTableName == null)) {
// No table matches the broker request.
LOGGER.warn("No table matches the name: {}", tableName);
_brokerMetrics.addMeteredTableValue(tableName, BrokerMeter.RESOURCE_MISSING_EXCEPTIONS, 1);
return BrokerResponseFactory.getStaticNoTableHitBrokerResponse(responseType);
} else {
// At least one table matches the broker request.
BrokerRequest offlineBrokerRequest = null;
BrokerRequest realtimeBrokerRequest = null;
// TODO: get time column name from schema or table config so that we can apply it in realtime only use case.
// We get timeColumnName from time boundary service currently, which only exists for offline table.
String timeColumnName = (offlineTableName != null) ? getTimeColumnName(offlineTableName) : null;
if ((offlineTableName != null) && (realtimeTableName != null)) {
// Hybrid table.
offlineBrokerRequest = _optimizer.optimize(getOfflineBrokerRequest(brokerRequest), timeColumnName);
realtimeBrokerRequest = _optimizer.optimize(getRealtimeBrokerRequest(brokerRequest), timeColumnName);
} else if (offlineTableName != null) {
// Offline table only.
brokerRequest.getQuerySource().setTableName(offlineTableName);
offlineBrokerRequest = _optimizer.optimize(brokerRequest, timeColumnName);
} else {
// Realtime table only.
brokerRequest.getQuerySource().setTableName(realtimeTableName);
realtimeBrokerRequest = _optimizer.optimize(brokerRequest, timeColumnName);
}
ReduceService reduceService = _reduceServiceRegistry.get(responseType);
// TODO: wire up the customized BucketingSelection.
return processOptimizedBrokerRequests(brokerRequest, offlineBrokerRequest, realtimeBrokerRequest, reduceService, scatterGatherStats, null, requestId);
}
}
use of javax.annotation.Nonnull in project pinot by linkedin.
the class BrokerRequestHandler method handleRequest.
/**
* Process a JSON format request.
*
* @param request JSON format request to be processed.
* @return broker response.
* @throws Exception
*/
@Nonnull
public BrokerResponse handleRequest(@Nonnull JSONObject request) throws Exception {
long requestId = _requestIdGenerator.incrementAndGet();
String pql = request.getString("pql");
LOGGER.debug("Query string for requestId {}: {}", requestId, pql);
boolean isTraceEnabled = false;
if (request.has("trace")) {
isTraceEnabled = Boolean.parseBoolean(request.getString("trace"));
LOGGER.debug("Trace is set to: {} for requestId {}: {}", isTraceEnabled, requestId, pql);
}
Map<String, String> debugOptions = null;
if (request.has("debugOptions")) {
String routingOptionParameter = request.getString("debugOptions");
debugOptions = Splitter.on(';').omitEmptyStrings().trimResults().withKeyValueSeparator('=').split(routingOptionParameter);
LOGGER.debug("Debug options are set to: {} for requestId {}: {}", debugOptions, requestId, pql);
}
// Compile and validate the request.
long compilationStartTime = System.nanoTime();
BrokerRequest brokerRequest;
try {
brokerRequest = REQUEST_COMPILER.compileToBrokerRequest(pql);
} catch (Exception e) {
LOGGER.warn("Parsing error on requestId {}: {}", requestId, pql, e);
_brokerMetrics.addMeteredGlobalValue(BrokerMeter.REQUEST_COMPILATION_EXCEPTIONS, 1);
return BrokerResponseFactory.getBrokerResponseWithException(DEFAULT_BROKER_RESPONSE_TYPE, QueryException.getException(QueryException.PQL_PARSING_ERROR, e));
}
String tableName = brokerRequest.getQuerySource().getTableName();
try {
validateRequest(brokerRequest);
} catch (Exception e) {
LOGGER.warn("Validation error on requestId {}: {}", requestId, pql, e);
_brokerMetrics.addMeteredTableValue(tableName, BrokerMeter.QUERY_VALIDATION_EXCEPTIONS, 1);
return BrokerResponseFactory.getBrokerResponseWithException(DEFAULT_BROKER_RESPONSE_TYPE, QueryException.getException(QueryException.QUERY_VALIDATION_ERROR, e));
}
if (isTraceEnabled) {
brokerRequest.setEnableTrace(true);
}
if (debugOptions != null) {
brokerRequest.setDebugOptions(debugOptions);
}
brokerRequest.setResponseFormat(ResponseType.BROKER_RESPONSE_TYPE_NATIVE.name());
_brokerMetrics.addPhaseTiming(tableName, BrokerQueryPhase.REQUEST_COMPILATION, System.nanoTime() - compilationStartTime);
_brokerMetrics.addMeteredTableValue(tableName, BrokerMeter.QUERIES, 1);
// Execute the query.
long executionStartTime = System.nanoTime();
ScatterGatherStats scatterGatherStats = new ScatterGatherStats();
BrokerResponse brokerResponse = processBrokerRequest(brokerRequest, scatterGatherStats, requestId);
_brokerMetrics.addPhaseTiming(tableName, BrokerQueryPhase.QUERY_EXECUTION, System.nanoTime() - executionStartTime);
// Set total query processing time.
long totalTimeMs = TimeUnit.MILLISECONDS.convert(System.nanoTime() - compilationStartTime, TimeUnit.NANOSECONDS);
brokerResponse.setTimeUsedMs(totalTimeMs);
LOGGER.debug("Broker Response: {}", brokerResponse);
// Table name might have been changed (with suffix _OFFLINE/_REALTIME appended).
LOGGER.info("RequestId: {}, table: {}, totalTimeMs: {}, numDocsScanned: {}, numEntriesScannedInFilter: {}, " + "numEntriesScannedPostFilter: {}, totalDocs: {}, scatterGatherStats: {}, query: {}", requestId, brokerRequest.getQuerySource().getTableName(), totalTimeMs, brokerResponse.getNumDocsScanned(), brokerResponse.getNumEntriesScannedInFilter(), brokerResponse.getNumEntriesScannedPostFilter(), brokerResponse.getTotalDocs(), scatterGatherStats, pql);
return brokerResponse;
}
use of javax.annotation.Nonnull in project pinot by linkedin.
the class DataSchema method toBytes.
@Nonnull
public byte[] toBytes() throws IOException {
ByteArrayOutputStream byteArrayOutputStream = new ByteArrayOutputStream();
DataOutputStream dataOutputStream = new DataOutputStream(byteArrayOutputStream);
int length = _columnNames.length;
// Write the number of columns.
dataOutputStream.writeInt(length);
// Write the column names.
for (String columnName : _columnNames) {
byte[] bytes = columnName.getBytes(UTF_8);
dataOutputStream.writeInt(bytes.length);
dataOutputStream.write(bytes);
}
// Write the column types.
for (FieldSpec.DataType columnType : _columnTypes) {
// We don't want to use ordinal of the enum since adding a new data type will break things if server and broker
// use different versions of DataType class.
byte[] bytes = columnType.name().getBytes(UTF_8);
dataOutputStream.writeInt(bytes.length);
dataOutputStream.write(bytes);
}
return byteArrayOutputStream.toByteArray();
}
use of javax.annotation.Nonnull in project pinot by linkedin.
the class RangeMergeOptimizer method optimizeRanges.
/**
* Recursive method that performs the actual optimization of merging range predicates.
*
* @param current Current node being visited in the DFS of the filter query tree.
* @param timeColumn Name of time column
* @return Returns the optimized filter query tree
*/
@Nonnull
private static FilterQueryTree optimizeRanges(@Nonnull FilterQueryTree current, @Nullable String timeColumn) {
if (timeColumn == null) {
return current;
}
List<FilterQueryTree> children = current.getChildren();
if (children == null || children.isEmpty()) {
return current;
}
// For OR, we optimize all its children, but do not propagate up.
FilterOperator operator = current.getOperator();
if (operator == FilterOperator.OR) {
int length = children.size();
for (int i = 0; i < length; i++) {
children.set(i, optimizeRanges(children.get(i), timeColumn));
}
return current;
}
// After this point, since the node has children, it can only be an 'AND' node (only OR/AND supported).
assert operator == FilterOperator.AND;
List<FilterQueryTree> newChildren = new ArrayList<>();
List<String> intersect = null;
for (FilterQueryTree child : children) {
FilterQueryTree newChild = optimizeRanges(child, timeColumn);
if (newChild.getOperator() == FilterOperator.RANGE && newChild.getColumn().equals(timeColumn)) {
List<String> value = newChild.getValue();
intersect = (intersect == null) ? value : intersectRanges(intersect, value);
} else {
newChildren.add(newChild);
}
}
if (newChildren.isEmpty()) {
return new FilterQueryTree(timeColumn, intersect, FilterOperator.RANGE, null);
} else {
if (intersect != null) {
newChildren.add(new FilterQueryTree(timeColumn, intersect, FilterOperator.RANGE, null));
}
return new FilterQueryTree(null, null, FilterOperator.AND, newChildren);
}
}
use of javax.annotation.Nonnull in project pinot by linkedin.
the class DataTableImplV2 method toBytes.
@Nonnull
@Override
public byte[] toBytes() throws IOException {
ByteArrayOutputStream byteArrayOutputStream = new ByteArrayOutputStream();
DataOutputStream dataOutputStream = new DataOutputStream(byteArrayOutputStream);
dataOutputStream.writeInt(VERSION);
dataOutputStream.writeInt(_numRows);
dataOutputStream.writeInt(_numColumns);
int dataOffset = HEADER_SIZE;
// Write dictionary.
dataOutputStream.writeInt(dataOffset);
byte[] dictionaryMapBytes = null;
if (_dictionaryMap != null) {
dictionaryMapBytes = serializeDictionaryMap();
dataOutputStream.writeInt(dictionaryMapBytes.length);
dataOffset += dictionaryMapBytes.length;
} else {
dataOutputStream.writeInt(0);
}
// Write metadata.
dataOutputStream.writeInt(dataOffset);
byte[] metadataBytes = serializeMetadata();
dataOutputStream.writeInt(metadataBytes.length);
dataOffset += metadataBytes.length;
// Write data schema.
dataOutputStream.writeInt(dataOffset);
byte[] dataSchemaBytes = null;
if (_dataSchema != null) {
dataSchemaBytes = _dataSchema.toBytes();
dataOutputStream.writeInt(dataSchemaBytes.length);
dataOffset += dataSchemaBytes.length;
} else {
dataOutputStream.writeInt(0);
}
// Write fixed size data.
dataOutputStream.writeInt(dataOffset);
if (_fixedSizeDataBytes != null) {
dataOutputStream.writeInt(_fixedSizeDataBytes.length);
dataOffset += _fixedSizeDataBytes.length;
} else {
dataOutputStream.writeInt(0);
}
// Write variable size data.
dataOutputStream.writeInt(dataOffset);
if (_variableSizeDataBytes != null) {
dataOutputStream.writeInt(_variableSizeDataBytes.length);
} else {
dataOutputStream.writeInt(0);
}
// Write actual data.
if (dictionaryMapBytes != null) {
dataOutputStream.write(dictionaryMapBytes);
}
dataOutputStream.write(metadataBytes);
if (dataSchemaBytes != null) {
dataOutputStream.write(dataSchemaBytes);
}
if (_fixedSizeDataBytes != null) {
dataOutputStream.write(_fixedSizeDataBytes);
}
if (_variableSizeDataBytes != null) {
dataOutputStream.write(_variableSizeDataBytes);
}
return byteArrayOutputStream.toByteArray();
}
Aggregations