use of java.util.concurrent.ConcurrentLinkedQueue in project jersey by jersey.
the class Main method runClient.
/**
* Client - business logic.
*
* @param args command-line arguments.
* @return exit code of the utility. {@code 0} if everything completed without errors, {@code -1} otherwise.
*/
static int runClient(final String[] args) {
// Parsing command-line arguments
final Config config = Config.parse(args);
System.out.println(String.format("\nStarting to execute %d requests:\n", config.requests));
// Creating JAX-RS client
final Client client = ClientBuilder.newClient();
// Targeting echo resource at URI "<baseUri>/long-running/(sync|async)/{echo}"
final WebTarget echoResource = client.target(config.baseUri).path("long-running/{mode}/{echo}").resolveTemplate("mode", (config.sync) ? "sync" : "async");
final CountDownLatch latch = new CountDownLatch(config.requests);
final Queue<String> errors = new ConcurrentLinkedQueue<String>();
final AtomicInteger requestCounter = new AtomicInteger(0);
final long tic = System.currentTimeMillis();
for (int i = 0; i < config.requests; i++) {
final int reqId = i;
echoResource.resolveTemplate("echo", reqId).request().async().get(new InvocationCallback<String>() {
private final AtomicInteger retries = new AtomicInteger(0);
@Override
public void completed(String response) {
final String requestId = Integer.toString(reqId);
if (requestId.equals(response)) {
System.out.print("*");
requestCounter.incrementAndGet();
} else {
System.out.print("!");
errors.offer(String.format("Echo response '%s' not equal to request '%s'", response, requestId));
}
latch.countDown();
}
@Override
public void failed(Throwable error) {
if (error.getCause() instanceof IOException && retries.getAndIncrement() < 3) {
// resend
echoResource.resolveTemplate("echo", reqId).request().async().get(this);
} else {
System.out.print("!");
errors.offer(String.format("Request '%d' has failed: %s", reqId, error.toString()));
latch.countDown();
}
}
});
}
try {
if (!latch.await(60, TimeUnit.SECONDS)) {
errors.offer("Waiting for requests to complete has timed out.");
}
} catch (InterruptedException e) {
errors.offer("Waiting for requests to complete has been interrupted.");
}
final long toc = System.currentTimeMillis();
System.out.println(String.format("\n\nExecution finished in %d ms.\nSuccess rate: %6.2f %%", toc - tic, ((double) requestCounter.get() / config.requests) * 100));
if (errors.size() > 0) {
System.out.println("Following errors occurred during the request execution");
for (String error : errors) {
System.out.println("\t" + error);
}
}
client.close();
return errors.size() > 0 ? -1 : 0;
}
use of java.util.concurrent.ConcurrentLinkedQueue in project jersey by jersey.
the class ItemStoreResourceTest method testItemsStore.
/**
* Test the item addition, addition event broadcasting and item retrieval from {@link ItemStoreResource}.
*
* @throws Exception in case of a test failure.
*/
@Test
public void testItemsStore() throws Exception {
final List<String> items = Collections.unmodifiableList(Arrays.asList("foo", "bar", "baz"));
final WebTarget itemsTarget = target("items");
// countdown on all events
final CountDownLatch latch = new CountDownLatch(items.size() * MAX_LISTENERS * 2);
final List<Queue<Integer>> indexQueues = new ArrayList<Queue<Integer>>(MAX_LISTENERS);
final EventSource[] sources = new EventSource[MAX_LISTENERS];
final AtomicInteger sizeEventsCount = new AtomicInteger(0);
for (int i = 0; i < MAX_LISTENERS; i++) {
final int id = i;
final EventSource es = EventSource.target(itemsTarget.path("events")).named("SOURCE " + id).build();
sources[id] = es;
final Queue<Integer> indexes = new ConcurrentLinkedQueue<Integer>();
indexQueues.add(indexes);
es.register(new EventListener() {
@SuppressWarnings("MagicNumber")
@Override
public void onEvent(InboundEvent inboundEvent) {
try {
if (inboundEvent.getName() == null) {
final String data = inboundEvent.readData();
LOGGER.info("[-i-] SOURCE " + id + ": Received event id=" + inboundEvent.getId() + " data=" + data);
indexes.add(items.indexOf(data));
} else if ("size".equals(inboundEvent.getName())) {
sizeEventsCount.incrementAndGet();
}
} catch (Exception ex) {
LOGGER.log(Level.SEVERE, "[-x-] SOURCE " + id + ": Error getting event data.", ex);
indexes.add(-999);
} finally {
latch.countDown();
}
}
});
}
try {
open(sources);
for (String item : items) {
postItem(itemsTarget, item);
}
assertTrue("Waiting to receive all events has timed out.", latch.await((1000 + MAX_LISTENERS * EventSource.RECONNECT_DEFAULT) * getAsyncTimeoutMultiplier(), TimeUnit.MILLISECONDS));
// need to force disconnect on server in order for EventSource.close(...) to succeed with HttpUrlConnection
sendCommand(itemsTarget, "disconnect");
} finally {
close(sources);
}
String postedItems = itemsTarget.request().get(String.class);
for (String item : items) {
assertTrue("Item '" + item + "' not stored on server.", postedItems.contains(item));
}
int queueId = 0;
for (Queue<Integer> indexes : indexQueues) {
for (int i = 0; i < items.size(); i++) {
assertTrue("Event for '" + items.get(i) + "' not received in queue " + queueId, indexes.contains(i));
}
assertEquals("Not received the expected number of events in queue " + queueId, items.size(), indexes.size());
queueId++;
}
assertEquals("Number of received 'size' events does not match.", items.size() * MAX_LISTENERS, sizeEventsCount.get());
}
use of java.util.concurrent.ConcurrentLinkedQueue in project pinot by linkedin.
the class QueryRunner method targetQPSQueryRunner.
/**
* Use multiple threads to run query at a target QPS.
* <p>Use a concurrent linked queue to buffer the queries to be sent. Use the main thread to insert queries into the
* queue at the target QPS, and start <code>numThreads</code> worker threads to fetch queries from the queue and send
* them.
* <p>The main thread is responsible for collecting and logging the statistic information periodically.
* <p>Queries are picked sequentially from the query file.
* <p>Query runner will stop when all queries in the query file has been executed number of times configured.
*
* @param conf perf benchmark driver config.
* @param queryFile query file.
* @param numTimesToRunQueries number of times to run all queries in the query file, 0 means infinite times.
* @param numThreads number of threads sending queries.
* @param startQPS start QPS (target QPS).
* @param reportIntervalMs report interval in milliseconds.
* @param numIntervalsToReportAndClearStatistics number of report intervals to report detailed statistics and clear
* them, 0 means never.
* @throws Exception
*/
public static void targetQPSQueryRunner(PerfBenchmarkDriverConf conf, String queryFile, int numTimesToRunQueries, int numThreads, double startQPS, int reportIntervalMs, int numIntervalsToReportAndClearStatistics) throws Exception {
List<String> queries;
try (FileInputStream input = new FileInputStream(new File(queryFile))) {
queries = IOUtils.readLines(input);
}
PerfBenchmarkDriver driver = new PerfBenchmarkDriver(conf);
ConcurrentLinkedQueue<String> queryQueue = new ConcurrentLinkedQueue<>();
AtomicInteger numQueriesExecuted = new AtomicInteger(0);
AtomicLong totalBrokerTime = new AtomicLong(0L);
AtomicLong totalClientTime = new AtomicLong(0L);
List<Statistics> statisticsList = Collections.singletonList(new Statistics(CLIENT_TIME_STATISTICS));
ExecutorService executorService = Executors.newFixedThreadPool(numThreads);
for (int i = 0; i < numThreads; i++) {
executorService.submit(new Worker(driver, queryQueue, numQueriesExecuted, totalBrokerTime, totalClientTime, statisticsList));
}
executorService.shutdown();
int queryIntervalMs = (int) (MILLIS_PER_SECOND / startQPS);
long startTime = System.currentTimeMillis();
long reportStartTime = startTime;
int numReportIntervals = 0;
int numTimesExecuted = 0;
while (numTimesToRunQueries == 0 || numTimesExecuted < numTimesToRunQueries) {
if (executorService.isTerminated()) {
LOGGER.error("All threads got exception and already dead.");
return;
}
for (String query : queries) {
queryQueue.add(query);
Thread.sleep(queryIntervalMs);
long currentTime = System.currentTimeMillis();
if (currentTime - reportStartTime >= reportIntervalMs) {
long timePassed = currentTime - startTime;
int numQueriesExecutedInt = numQueriesExecuted.get();
LOGGER.info("Target QPS: {}, Time Passed: {}ms, Queries Executed: {}, Average QPS: {}, " + "Average Broker Time: {}ms, Average Client Time: {}ms, Queries Queued: {}.", startQPS, timePassed, numQueriesExecutedInt, numQueriesExecutedInt / ((double) timePassed / MILLIS_PER_SECOND), totalBrokerTime.get() / (double) numQueriesExecutedInt, totalClientTime.get() / (double) numQueriesExecutedInt, queryQueue.size());
reportStartTime = currentTime;
numReportIntervals++;
if ((numIntervalsToReportAndClearStatistics != 0) && (numReportIntervals == numIntervalsToReportAndClearStatistics)) {
numReportIntervals = 0;
startTime = currentTime;
reportAndClearStatistics(numQueriesExecuted, totalBrokerTime, totalClientTime, statisticsList);
}
}
}
numTimesExecuted++;
}
// Wait for all queries getting executed.
while (queryQueue.size() != 0) {
Thread.sleep(1);
}
executorService.shutdownNow();
while (!executorService.isTerminated()) {
Thread.sleep(1);
}
long timePassed = System.currentTimeMillis() - startTime;
int numQueriesExecutedInt = numQueriesExecuted.get();
LOGGER.info("--------------------------------------------------------------------------------");
LOGGER.info("FINAL REPORT:");
LOGGER.info("Target QPS: {}, Time Passed: {}ms, Queries Executed: {}, Average QPS: {}, " + "Average Broker Time: {}ms, Average Client Time: {}ms.", startQPS, timePassed, numQueriesExecutedInt, numQueriesExecutedInt / ((double) timePassed / MILLIS_PER_SECOND), totalBrokerTime.get() / (double) numQueriesExecutedInt, totalClientTime.get() / (double) numQueriesExecutedInt);
for (Statistics statistics : statisticsList) {
statistics.report();
}
}
use of java.util.concurrent.ConcurrentLinkedQueue in project pinot by linkedin.
the class QueryRunner method increasingQPSQueryRunner.
/**
* Use multiple threads to run query at an increasing target QPS.
* <p>Use a concurrent linked queue to buffer the queries to be sent. Use the main thread to insert queries into the
* queue at the target QPS, and start <code>numThreads</code> worker threads to fetch queries from the queue and send
* them.
* <p>We start with the start QPS, and keep adding delta QPS to the start QPS during the test.
* <p>The main thread is responsible for collecting and logging the statistic information periodically.
* <p>Queries are picked sequentially from the query file.
* <p>Query runner will stop when all queries in the query file has been executed number of times configured.
*
* @param conf perf benchmark driver config.
* @param queryFile query file.
* @param numTimesToRunQueries number of times to run all queries in the query file, 0 means infinite times.
* @param numThreads number of threads sending queries.
* @param startQPS start QPS.
* @param deltaQPS delta QPS.
* @param reportIntervalMs report interval in milliseconds.
* @param numIntervalsToReportAndClearStatistics number of report intervals to report detailed statistics and clear
* them, 0 means never.
* @param numIntervalsToIncreaseQPS number of intervals to increase QPS.
* @throws Exception
*/
public static void increasingQPSQueryRunner(PerfBenchmarkDriverConf conf, String queryFile, int numTimesToRunQueries, int numThreads, double startQPS, double deltaQPS, int reportIntervalMs, int numIntervalsToReportAndClearStatistics, int numIntervalsToIncreaseQPS) throws Exception {
List<String> queries;
try (FileInputStream input = new FileInputStream(new File(queryFile))) {
queries = IOUtils.readLines(input);
}
PerfBenchmarkDriver driver = new PerfBenchmarkDriver(conf);
ConcurrentLinkedQueue<String> queryQueue = new ConcurrentLinkedQueue<>();
AtomicInteger numQueriesExecuted = new AtomicInteger(0);
AtomicLong totalBrokerTime = new AtomicLong(0L);
AtomicLong totalClientTime = new AtomicLong(0L);
List<Statistics> statisticsList = Collections.singletonList(new Statistics(CLIENT_TIME_STATISTICS));
ExecutorService executorService = Executors.newFixedThreadPool(numThreads);
for (int i = 0; i < numThreads; i++) {
executorService.submit(new Worker(driver, queryQueue, numQueriesExecuted, totalBrokerTime, totalClientTime, statisticsList));
}
executorService.shutdown();
long startTime = System.currentTimeMillis();
long reportStartTime = startTime;
int numReportIntervals = 0;
int numTimesExecuted = 0;
double currentQPS = startQPS;
int queryIntervalMs = (int) (MILLIS_PER_SECOND / currentQPS);
while (numTimesToRunQueries == 0 || numTimesExecuted < numTimesToRunQueries) {
if (executorService.isTerminated()) {
LOGGER.error("All threads got exception and already dead.");
return;
}
for (String query : queries) {
queryQueue.add(query);
Thread.sleep(queryIntervalMs);
long currentTime = System.currentTimeMillis();
if (currentTime - reportStartTime >= reportIntervalMs) {
long timePassed = currentTime - startTime;
reportStartTime = currentTime;
numReportIntervals++;
if (numReportIntervals == numIntervalsToIncreaseQPS) {
// Try to find the next interval.
double newQPS = currentQPS + deltaQPS;
int newQueryIntervalMs;
// Skip the target QPS with the same interval as the previous one.
while ((newQueryIntervalMs = (int) (MILLIS_PER_SECOND / newQPS)) == queryIntervalMs) {
newQPS += deltaQPS;
}
if (newQueryIntervalMs == 0) {
LOGGER.warn("Due to sleep granularity of millisecond, cannot further increase QPS.");
} else {
// Find the next interval.
LOGGER.info("--------------------------------------------------------------------------------");
LOGGER.info("REPORT FOR TARGET QPS: {}", currentQPS);
int numQueriesExecutedInt = numQueriesExecuted.get();
LOGGER.info("Current Target QPS: {}, Time Passed: {}ms, Queries Executed: {}, Average QPS: {}, " + "Average Broker Time: {}ms, Average Client Time: {}ms, Queries Queued: {}.", currentQPS, timePassed, numQueriesExecutedInt, numQueriesExecutedInt / ((double) timePassed / MILLIS_PER_SECOND), totalBrokerTime.get() / (double) numQueriesExecutedInt, totalClientTime.get() / (double) numQueriesExecutedInt, queryQueue.size());
numReportIntervals = 0;
startTime = currentTime;
reportAndClearStatistics(numQueriesExecuted, totalBrokerTime, totalClientTime, statisticsList);
currentQPS = newQPS;
queryIntervalMs = newQueryIntervalMs;
LOGGER.info("Increase target QPS to: {}, the following statistics are for the new target QPS.", currentQPS);
}
} else {
int numQueriesExecutedInt = numQueriesExecuted.get();
LOGGER.info("Current Target QPS: {}, Time Passed: {}ms, Queries Executed: {}, Average QPS: {}, " + "Average Broker Time: {}ms, Average Client Time: {}ms, Queries Queued: {}.", currentQPS, timePassed, numQueriesExecutedInt, numQueriesExecutedInt / ((double) timePassed / MILLIS_PER_SECOND), totalBrokerTime.get() / (double) numQueriesExecutedInt, totalClientTime.get() / (double) numQueriesExecutedInt, queryQueue.size());
if ((numIntervalsToReportAndClearStatistics != 0) && (numReportIntervals % numIntervalsToReportAndClearStatistics == 0)) {
startTime = currentTime;
reportAndClearStatistics(numQueriesExecuted, totalBrokerTime, totalClientTime, statisticsList);
}
}
}
}
numTimesExecuted++;
}
// Wait for all queries getting executed.
while (queryQueue.size() != 0) {
Thread.sleep(1);
}
executorService.shutdownNow();
while (!executorService.isTerminated()) {
Thread.sleep(1);
}
long timePassed = System.currentTimeMillis() - startTime;
int numQueriesExecutedInt = numQueriesExecuted.get();
LOGGER.info("--------------------------------------------------------------------------------");
LOGGER.info("FINAL REPORT:");
LOGGER.info("Current Target QPS: {}, Time Passed: {}ms, Queries Executed: {}, Average QPS: {}, " + "Average Broker Time: {}ms, Average Client Time: {}ms.", currentQPS, timePassed, numQueriesExecutedInt, numQueriesExecutedInt / ((double) timePassed / MILLIS_PER_SECOND), totalBrokerTime.get() / (double) numQueriesExecutedInt, totalClientTime.get() / (double) numQueriesExecutedInt);
for (Statistics statistics : statisticsList) {
statistics.report();
}
}
use of java.util.concurrent.ConcurrentLinkedQueue in project pinot by linkedin.
the class MCombineGroupByOperator method combineBlocks.
/**
* This method combines the result blocks from underlying operators and builds a
* merged, sorted and trimmed result block.
* 1. Result blocks from underlying operators are merged concurrently into a
* HashMap, with appropriate synchronizations. Result blocks themselves are stored
* in the specified blocks[].
* - The key in this concurrent map is the group-by key, and value is an array of
* Objects (one for each aggregation function).
* - Synchronization is provided by locking the group-key that is to be modified.
*
* 2. The result of the concurrent map is then translated into what is expected by
* the broker (List<Map<String, Object>>).
*
* 3. This result is then sorted and then trimmed as per 'TOP N' in the brokerRequest.
*
* @return IntermediateResultBlock containing the final results from combine operation.
*/
private IntermediateResultsBlock combineBlocks() throws InterruptedException {
int numOperators = _operators.size();
final CountDownLatch operatorLatch = new CountDownLatch(numOperators);
final Map<String, Object[]> resultsMap = new ConcurrentHashMap<>();
final ConcurrentLinkedQueue<ProcessingException> mergedProcessingExceptions = new ConcurrentLinkedQueue<>();
List<AggregationInfo> aggregationInfos = _brokerRequest.getAggregationsInfo();
final AggregationFunctionContext[] aggregationFunctionContexts = AggregationFunctionUtils.getAggregationFunctionContexts(aggregationInfos, null);
final int numAggregationFunctions = aggregationFunctionContexts.length;
for (int i = 0; i < numOperators; i++) {
final int index = i;
_executorService.execute(new TraceRunnable() {
@SuppressWarnings("unchecked")
@Override
public void runJob() {
AggregationGroupByResult aggregationGroupByResult;
try {
IntermediateResultsBlock intermediateResultsBlock = (IntermediateResultsBlock) _operators.get(index).nextBlock();
// Merge processing exceptions.
List<ProcessingException> processingExceptionsToMerge = intermediateResultsBlock.getProcessingExceptions();
if (processingExceptionsToMerge != null) {
mergedProcessingExceptions.addAll(processingExceptionsToMerge);
}
// Merge aggregation group-by result.
aggregationGroupByResult = intermediateResultsBlock.getAggregationGroupByResult();
if (aggregationGroupByResult != null) {
// Iterate over the group-by keys, for each key, update the group-by result in the resultsMap.
Iterator<GroupKeyGenerator.GroupKey> groupKeyIterator = aggregationGroupByResult.getGroupKeyIterator();
while (groupKeyIterator.hasNext()) {
GroupKeyGenerator.GroupKey groupKey = groupKeyIterator.next();
String groupKeyString = groupKey.getStringKey();
// HashCode method might return negative value, make it non-negative
int lockIndex = (groupKeyString.hashCode() & Integer.MAX_VALUE) % NUM_LOCKS;
synchronized (LOCKS[lockIndex]) {
Object[] results = resultsMap.get(groupKeyString);
if (results == null) {
results = new Object[numAggregationFunctions];
for (int j = 0; j < numAggregationFunctions; j++) {
results[j] = aggregationGroupByResult.getResultForKey(groupKey, j);
}
resultsMap.put(groupKeyString, results);
} else {
for (int j = 0; j < numAggregationFunctions; j++) {
results[j] = aggregationFunctionContexts[j].getAggregationFunction().merge(results[j], aggregationGroupByResult.getResultForKey(groupKey, j));
}
}
}
}
}
} catch (Exception e) {
LOGGER.error("Exception processing CombineGroupBy for index {}, operator {}", index, _operators.get(index).getClass().getName(), e);
mergedProcessingExceptions.add(QueryException.getException(QueryException.QUERY_EXECUTION_ERROR, e));
}
operatorLatch.countDown();
}
});
}
boolean opCompleted = operatorLatch.await(_timeOutMs, TimeUnit.MILLISECONDS);
if (!opCompleted) {
// If this happens, the broker side should already timed out, just log the error in server side.
LOGGER.error("Timed out while combining group-by results, after {}ms.", _timeOutMs);
return new IntermediateResultsBlock(new TimeoutException("CombineGroupBy timed out."));
}
// Trim the results map.
AggregationGroupByTrimmingService aggregationGroupByTrimmingService = new AggregationGroupByTrimmingService(aggregationFunctionContexts, (int) _brokerRequest.getGroupBy().getTopN());
List<Map<String, Object>> trimmedResults = aggregationGroupByTrimmingService.trimIntermediateResultsMap(resultsMap);
IntermediateResultsBlock mergedBlock = new IntermediateResultsBlock(aggregationFunctionContexts, trimmedResults, true);
// Set the processing exceptions.
if (!mergedProcessingExceptions.isEmpty()) {
mergedBlock.setProcessingExceptions(new ArrayList<>(mergedProcessingExceptions));
}
// Set the execution statistics.
ExecutionStatistics executionStatistics = new ExecutionStatistics();
for (Operator operator : _operators) {
ExecutionStatistics executionStatisticsToMerge = operator.getExecutionStatistics();
if (executionStatisticsToMerge != null) {
executionStatistics.merge(executionStatisticsToMerge);
}
}
mergedBlock.setNumDocsScanned(executionStatistics.getNumDocsScanned());
mergedBlock.setNumEntriesScannedInFilter(executionStatistics.getNumEntriesScannedInFilter());
mergedBlock.setNumEntriesScannedPostFilter(executionStatistics.getNumEntriesScannedPostFilter());
mergedBlock.setNumTotalRawDocs(executionStatistics.getNumTotalRawDocs());
return mergedBlock;
}
Aggregations