use of com.linkedin.pinot.tools.scan.query.QueryResponse in project pinot by linkedin.
the class HybridClusterScanComparisonIntegrationTest method runQuery.
protected void runQuery(String pqlQuery, ScanBasedQueryProcessor scanBasedQueryProcessor, boolean displayStatus, String scanResult) throws Exception {
JSONObject scanJson;
if (scanResult == null) {
QueryResponse scanResponse = scanBasedQueryProcessor.processQuery(pqlQuery);
String scanRspStr = new ObjectMapper().writeValueAsString(scanResponse);
if (_scanRspFileWriter != null) {
if (scanRspStr.contains("\n")) {
throw new RuntimeException("We don't handle new lines in json responses yet. The reader will parse newline as separator between query responses");
}
_scanRspFileWriter.write(scanRspStr + "\n");
}
scanJson = new JSONObject(scanRspStr);
} else {
scanJson = new JSONObject(scanResult);
}
JSONObject pinotJson = postQuery(pqlQuery);
QueryComparison.setCompareNumDocs(false);
try {
QueryComparison.ComparisonStatus comparisonStatus = QueryComparison.compareWithEmpty(pinotJson, scanJson);
if (comparisonStatus.equals(QueryComparison.ComparisonStatus.FAILED)) {
_compareStatusFileWriter.write("\nQuery comparison failed for query " + _nQueriesRead + ":" + pqlQuery + "\n" + "Scan json: " + scanJson + "\n" + "Pinot json: " + pinotJson + "\n");
_failedQueries.getAndIncrement();
} else {
_successfulQueries.getAndIncrement();
if (comparisonStatus.equals(QueryComparison.ComparisonStatus.EMPTY)) {
_emptyResults.getAndIncrement();
} else if (_logMatchingResults) {
_compareStatusFileWriter.write("\nMatched for query:" + pqlQuery + "\n" + scanJson + "\n");
}
}
_compareStatusFileWriter.flush();
} catch (Exception e) {
_compareStatusFileWriter.write("Caught exception while running query comparison, failed for query " + pqlQuery + "\n" + "Scan json: " + scanJson + "\n" + "Pinot json: " + pinotJson + "\n");
_failedQueries.getAndIncrement();
_compareStatusFileWriter.flush();
}
int totalQueries = _successfulQueries.get() + _failedQueries.get();
if (displayStatus || totalQueries % 5000 == 0) {
doDisplayStatus(totalQueries);
}
}
use of com.linkedin.pinot.tools.scan.query.QueryResponse in project pinot by linkedin.
the class QueryComparison method runFunctionMode.
private void runFunctionMode() throws Exception {
BufferedReader resultReader = null;
ScanBasedQueryProcessor scanBasedQueryProcessor = null;
try (BufferedReader queryReader = new BufferedReader(new InputStreamReader(new FileInputStream(_queryFile), "UTF8"))) {
if (_resultFile == null) {
scanBasedQueryProcessor = new ScanBasedQueryProcessor(_segmentsDir.getAbsolutePath());
} else {
resultReader = new BufferedReader(new InputStreamReader(new FileInputStream(_resultFile), "UTF8"));
}
int passed = 0;
int total = 0;
String query;
while ((query = queryReader.readLine()) != null) {
if (query.isEmpty() || query.startsWith("#")) {
continue;
}
JSONObject expectedJson = null;
try {
if (resultReader != null) {
expectedJson = new JSONObject(resultReader.readLine());
} else {
QueryResponse expectedResponse = scanBasedQueryProcessor.processQuery(query);
expectedJson = new JSONObject(new ObjectMapper().writeValueAsString(expectedResponse));
}
} catch (Exception e) {
LOGGER.error("Comparison FAILED: Id: {} Exception caught while getting expected response for query: '{}'", total, query, e);
}
JSONObject actualJson = null;
if (expectedJson != null) {
try {
actualJson = new JSONObject(_clusterStarter.query(query));
} catch (Exception e) {
LOGGER.error("Comparison FAILED: Id: {} Exception caught while running query: '{}'", total, query, e);
}
}
if (expectedJson != null && actualJson != null) {
try {
if (compare(actualJson, expectedJson)) {
passed++;
LOGGER.info("Comparison PASSED: Id: {} actual Time: {} ms expected Time: {} ms Docs Scanned: {}", total, actualJson.get(TIME_USED_MS), expectedJson.get(TIME_USED_MS), actualJson.get(NUM_DOCS_SCANNED));
LOGGER.debug("actual Response: {}", actualJson);
LOGGER.debug("expected Response: {}", expectedJson);
} else {
LOGGER.error("Comparison FAILED: Id: {} query: {}", query);
LOGGER.info("actual Response: {}", actualJson);
LOGGER.info("expected Response: {}", expectedJson);
}
} catch (Exception e) {
LOGGER.error("Comparison FAILED: Id: {} Exception caught while comparing query: '{}' actual response: {}, expected response: {}", total, query, actualJson, expectedJson, e);
}
}
total++;
}
LOGGER.info("Total {} out of {} queries passed.", passed, total);
} finally {
if (resultReader != null) {
resultReader.close();
}
}
}
use of com.linkedin.pinot.tools.scan.query.QueryResponse in project pinot by linkedin.
the class HybridClusterScanComparisonIntegrationTest method runTestLoop.
protected void runTestLoop(Callable<Object> testMethod, boolean useMultipleThreads) throws Exception {
// Clean up the Kafka topic
// TODO jfim: Re-enable this once PINOT-2598 is fixed
// purgeKafkaTopicAndResetRealtimeTable();
List<Pair<File, File>> enabledRealtimeSegments = new ArrayList<>();
// Sort the realtime segments based on their segment name so they get added from earliest to latest
TreeMap<File, File> sortedRealtimeSegments = new TreeMap<File, File>(new Comparator<File>() {
@Override
public int compare(File o1, File o2) {
return _realtimeAvroToSegmentMap.get(o1).getName().compareTo(_realtimeAvroToSegmentMap.get(o2).getName());
}
});
sortedRealtimeSegments.putAll(_realtimeAvroToSegmentMap);
for (File avroFile : sortedRealtimeSegments.keySet()) {
enabledRealtimeSegments.add(Pair.of(avroFile, sortedRealtimeSegments.get(avroFile)));
if (useMultipleThreads) {
_queryExecutor = new ThreadPoolExecutor(4, 4, 5, TimeUnit.SECONDS, new ArrayBlockingQueue<Runnable>(50), new ThreadPoolExecutor.CallerRunsPolicy());
}
// Push avro for the new segment
LOGGER.info("Pushing Avro file {} into Kafka", avroFile);
pushAvroIntoKafka(Collections.singletonList(avroFile), KafkaStarterUtils.DEFAULT_KAFKA_BROKER, KAFKA_TOPIC);
// Configure the scan based comparator to use the distinct union of the offline and realtime segments
configureScanBasedComparator(enabledRealtimeSegments);
QueryResponse queryResponse = _scanBasedQueryProcessor.processQuery("select count(*) from mytable");
int expectedRecordCount = queryResponse.getNumDocsScanned();
waitForRecordCountToStabilizeToExpectedCount(expectedRecordCount, System.currentTimeMillis() + getStabilizationTimeMs());
// Run the actual tests
LOGGER.info("Running queries");
testMethod.call();
if (useMultipleThreads) {
if (_nQueriesRead == -1) {
_queryExecutor.shutdown();
_queryExecutor.awaitTermination(5, TimeUnit.MINUTES);
} else {
int totalQueries = _failedQueries.get() + _successfulQueries.get();
while (totalQueries < _nQueriesRead) {
LOGGER.info("Completed " + totalQueries + " out of " + _nQueriesRead + " - waiting");
Uninterruptibles.sleepUninterruptibly(20, TimeUnit.SECONDS);
totalQueries = _failedQueries.get() + _successfulQueries.get();
}
if (totalQueries > _nQueriesRead) {
throw new RuntimeException("Executed " + totalQueries + " more than " + _nQueriesRead);
}
_queryExecutor.shutdown();
}
}
int totalQueries = _failedQueries.get() + _successfulQueries.get();
doDisplayStatus(totalQueries);
// Release resources
_scanBasedQueryProcessor.close();
_compareStatusFileWriter.write("Status after push of " + avroFile + ":" + System.currentTimeMillis() + ":Executed " + _nQueriesRead + " queries, " + _failedQueries + " failures," + _emptyResults.get() + " empty results\n");
}
}
Aggregations