use of java.util.concurrent.ExecutorService in project pinot by linkedin.
the class QueryRunner method increasingQPSQueryRunner.
/**
* Use multiple threads to run query at an increasing target QPS.
* <p>Use a concurrent linked queue to buffer the queries to be sent. Use the main thread to insert queries into the
* queue at the target QPS, and start <code>numThreads</code> worker threads to fetch queries from the queue and send
* them.
* <p>We start with the start QPS, and keep adding delta QPS to the start QPS during the test.
* <p>The main thread is responsible for collecting and logging the statistic information periodically.
* <p>Queries are picked sequentially from the query file.
* <p>Query runner will stop when all queries in the query file has been executed number of times configured.
*
* @param conf perf benchmark driver config.
* @param queryFile query file.
* @param numTimesToRunQueries number of times to run all queries in the query file, 0 means infinite times.
* @param numThreads number of threads sending queries.
* @param startQPS start QPS.
* @param deltaQPS delta QPS.
* @param reportIntervalMs report interval in milliseconds.
* @param numIntervalsToReportAndClearStatistics number of report intervals to report detailed statistics and clear
* them, 0 means never.
* @param numIntervalsToIncreaseQPS number of intervals to increase QPS.
* @throws Exception
*/
public static void increasingQPSQueryRunner(PerfBenchmarkDriverConf conf, String queryFile, int numTimesToRunQueries, int numThreads, double startQPS, double deltaQPS, int reportIntervalMs, int numIntervalsToReportAndClearStatistics, int numIntervalsToIncreaseQPS) throws Exception {
List<String> queries;
try (FileInputStream input = new FileInputStream(new File(queryFile))) {
queries = IOUtils.readLines(input);
}
PerfBenchmarkDriver driver = new PerfBenchmarkDriver(conf);
ConcurrentLinkedQueue<String> queryQueue = new ConcurrentLinkedQueue<>();
AtomicInteger numQueriesExecuted = new AtomicInteger(0);
AtomicLong totalBrokerTime = new AtomicLong(0L);
AtomicLong totalClientTime = new AtomicLong(0L);
List<Statistics> statisticsList = Collections.singletonList(new Statistics(CLIENT_TIME_STATISTICS));
ExecutorService executorService = Executors.newFixedThreadPool(numThreads);
for (int i = 0; i < numThreads; i++) {
executorService.submit(new Worker(driver, queryQueue, numQueriesExecuted, totalBrokerTime, totalClientTime, statisticsList));
}
executorService.shutdown();
long startTime = System.currentTimeMillis();
long reportStartTime = startTime;
int numReportIntervals = 0;
int numTimesExecuted = 0;
double currentQPS = startQPS;
int queryIntervalMs = (int) (MILLIS_PER_SECOND / currentQPS);
while (numTimesToRunQueries == 0 || numTimesExecuted < numTimesToRunQueries) {
if (executorService.isTerminated()) {
LOGGER.error("All threads got exception and already dead.");
return;
}
for (String query : queries) {
queryQueue.add(query);
Thread.sleep(queryIntervalMs);
long currentTime = System.currentTimeMillis();
if (currentTime - reportStartTime >= reportIntervalMs) {
long timePassed = currentTime - startTime;
reportStartTime = currentTime;
numReportIntervals++;
if (numReportIntervals == numIntervalsToIncreaseQPS) {
// Try to find the next interval.
double newQPS = currentQPS + deltaQPS;
int newQueryIntervalMs;
// Skip the target QPS with the same interval as the previous one.
while ((newQueryIntervalMs = (int) (MILLIS_PER_SECOND / newQPS)) == queryIntervalMs) {
newQPS += deltaQPS;
}
if (newQueryIntervalMs == 0) {
LOGGER.warn("Due to sleep granularity of millisecond, cannot further increase QPS.");
} else {
// Find the next interval.
LOGGER.info("--------------------------------------------------------------------------------");
LOGGER.info("REPORT FOR TARGET QPS: {}", currentQPS);
int numQueriesExecutedInt = numQueriesExecuted.get();
LOGGER.info("Current Target QPS: {}, Time Passed: {}ms, Queries Executed: {}, Average QPS: {}, " + "Average Broker Time: {}ms, Average Client Time: {}ms, Queries Queued: {}.", currentQPS, timePassed, numQueriesExecutedInt, numQueriesExecutedInt / ((double) timePassed / MILLIS_PER_SECOND), totalBrokerTime.get() / (double) numQueriesExecutedInt, totalClientTime.get() / (double) numQueriesExecutedInt, queryQueue.size());
numReportIntervals = 0;
startTime = currentTime;
reportAndClearStatistics(numQueriesExecuted, totalBrokerTime, totalClientTime, statisticsList);
currentQPS = newQPS;
queryIntervalMs = newQueryIntervalMs;
LOGGER.info("Increase target QPS to: {}, the following statistics are for the new target QPS.", currentQPS);
}
} else {
int numQueriesExecutedInt = numQueriesExecuted.get();
LOGGER.info("Current Target QPS: {}, Time Passed: {}ms, Queries Executed: {}, Average QPS: {}, " + "Average Broker Time: {}ms, Average Client Time: {}ms, Queries Queued: {}.", currentQPS, timePassed, numQueriesExecutedInt, numQueriesExecutedInt / ((double) timePassed / MILLIS_PER_SECOND), totalBrokerTime.get() / (double) numQueriesExecutedInt, totalClientTime.get() / (double) numQueriesExecutedInt, queryQueue.size());
if ((numIntervalsToReportAndClearStatistics != 0) && (numReportIntervals % numIntervalsToReportAndClearStatistics == 0)) {
startTime = currentTime;
reportAndClearStatistics(numQueriesExecuted, totalBrokerTime, totalClientTime, statisticsList);
}
}
}
}
numTimesExecuted++;
}
// Wait for all queries getting executed.
while (queryQueue.size() != 0) {
Thread.sleep(1);
}
executorService.shutdownNow();
while (!executorService.isTerminated()) {
Thread.sleep(1);
}
long timePassed = System.currentTimeMillis() - startTime;
int numQueriesExecutedInt = numQueriesExecuted.get();
LOGGER.info("--------------------------------------------------------------------------------");
LOGGER.info("FINAL REPORT:");
LOGGER.info("Current Target QPS: {}, Time Passed: {}ms, Queries Executed: {}, Average QPS: {}, " + "Average Broker Time: {}ms, Average Client Time: {}ms.", currentQPS, timePassed, numQueriesExecutedInt, numQueriesExecutedInt / ((double) timePassed / MILLIS_PER_SECOND), totalBrokerTime.get() / (double) numQueriesExecutedInt, totalClientTime.get() / (double) numQueriesExecutedInt);
for (Statistics statistics : statisticsList) {
statistics.report();
}
}
use of java.util.concurrent.ExecutorService in project pinot by linkedin.
the class ScanBasedQueryProcessor method processSegments.
private List<ResultTable> processSegments(final String query, final BrokerRequest brokerRequest) throws InterruptedException {
ExecutorService executorService = Executors.newFixedThreadPool(10);
final List<ResultTable> resultTables = Collections.synchronizedList(new ArrayList<ResultTable>());
for (final SegmentQueryProcessor segmentQueryProcessor : _segmentQueryProcessorMap.values()) {
executorService.execute(new Runnable() {
@Override
public void run() {
try {
ResultTable resultTable = segmentQueryProcessor.process(brokerRequest);
if (resultTable != null) {
resultTables.add(resultTable);
}
} catch (Exception e) {
LOGGER.error("Exception caught while processing segment '{}'.", segmentQueryProcessor.getSegmentName(), e);
return;
}
}
});
}
executorService.shutdown();
executorService.awaitTermination(_timeoutInSeconds, TimeUnit.SECONDS);
return resultTables;
}
use of java.util.concurrent.ExecutorService in project pinot by linkedin.
the class NettySingleConnectionIntegrationTest method testServerShutdownLeak.
/*
* This test attempts to use the connection mechanism the same way as ScatterGatherImpl.SingleRequestHandler does.
*
* WARNING: This test has potential failures due to timing.
*/
@Test
public void testServerShutdownLeak() throws Exception {
final NettyClientMetrics metric = new NettyClientMetrics(null, "abc");
final Timer timer = new HashedWheelTimer();
final int minConns = 2;
final int maxConns = 3;
// 10M ms.
final int maxIdleTimeoutMs = 10000000;
final int maxBacklogPerServer = 1;
MyServer server = new MyServer();
Thread.sleep(1000);
// used as a key to pool. Can be anything.
final String serverName = "SomeServer";
final ServerInstance serverInstance = server.getServerInstance();
final MetricsRegistry metricsRegistry = new MetricsRegistry();
EventLoopGroup eventLoopGroup = new NioEventLoopGroup();
PooledNettyClientResourceManager resourceManager = new PooledNettyClientResourceManager(eventLoopGroup, new HashedWheelTimer(), metric);
ExecutorService executorService = Executors.newCachedThreadPool();
ScheduledExecutorService timeoutExecutor = new ScheduledThreadPoolExecutor(5);
AsyncPoolResourceManagerAdapter<ServerInstance, NettyClientConnection> rmAdapter = new AsyncPoolResourceManagerAdapter<ServerInstance, NettyClientConnection>(serverInstance, resourceManager, executorService, metricsRegistry);
KeyedPool<ServerInstance, NettyClientConnection> keyedPool = new KeyedPoolImpl<ServerInstance, NettyClientConnection>(minConns, maxConns, maxIdleTimeoutMs, maxBacklogPerServer, resourceManager, timeoutExecutor, executorService, metricsRegistry);
resourceManager.setPool(keyedPool);
keyedPool.start();
Field keyedPoolMap = KeyedPoolImpl.class.getDeclaredField("_keyedPool");
keyedPoolMap.setAccessible(true);
KeyedFuture<ServerInstance, NettyClientConnection> keyedFuture = keyedPool.checkoutObject(serverInstance);
// The connection pool for this server is created on demand, so we can now get a reference to the _keyedPool.
// The act of calling checkoutObject() creates a new AsyncPoolImpl and places a request for a new connection.
// Since no new connections are available in the beginning, we always end up creating one more than the min.
Map<ServerInstance, AsyncPool<NettyClientConnection>> poolMap = (Map<ServerInstance, AsyncPool<NettyClientConnection>>) keyedPoolMap.get(keyedPool);
AsyncPool<NettyClientConnection> asyncPool = poolMap.get(serverInstance);
Field waiterList = AsyncPoolImpl.class.getDeclaredField("_waiters");
waiterList.setAccessible(true);
LinkedDequeue queue = (LinkedDequeue) waiterList.get(asyncPool);
PoolStats stats;
// If the number of waiters is = 0, then we will error out because the min connections may not have completed
// by the time we check one out. If maxWaiters is > 0, then we may end up initiating a fresh connection while the
// min is still being filled. So, best to sleep a little to make sure that the min pool size is filled out, so that
// the stats are correct.
Thread.sleep(2000L);
stats = asyncPool.getStats();
Assert.assertEquals(stats.getIdleCount(), minConns);
Assert.assertEquals(stats.getPoolSize(), minConns + 1);
NettyClientConnection conn = keyedFuture.getOne();
LOGGER.debug("Got connection ID " + conn.getConnId());
Assert.assertEquals(stats.getIdleCount(), minConns);
Assert.assertEquals(stats.getPoolSize(), minConns + 1);
// Now get two more connections to the server, since we have 2 idle, we should get those.
// And leak them.
keyedFuture = keyedPool.checkoutObject(serverInstance);
conn = keyedFuture.getOne();
LOGGER.debug("Got connection ID " + conn.getConnId());
keyedFuture = keyedPool.checkoutObject(serverInstance);
conn = keyedFuture.getOne();
LOGGER.debug("Got connection ID " + conn.getConnId());
// Now we should have 0 idle, and a pool size of 3 with no waiters.
stats = asyncPool.getStats();
Assert.assertEquals(stats.getIdleCount(), 0);
Assert.assertEquals(stats.getPoolSize(), minConns + 1);
Assert.assertEquals(queue.size(), 0);
// Now, we will always get an exception because we don't have a free connection to the server.
{
keyedFuture = keyedPool.checkoutObject(serverInstance);
boolean caughtException = false;
LOGGER.debug("Will never get a connection here.");
try {
conn = keyedFuture.getOne(3, TimeUnit.SECONDS);
} catch (TimeoutException e) {
caughtException = true;
}
Assert.assertTrue(caughtException);
keyedFuture.cancel(true);
}
// Now if the server goes down, we should release all three connections and be able to get a successful new connection
LOGGER.info("Shutting down server instance");
server.shutdown();
// Give it time to clean up on the client side.
Thread.sleep(2000L);
stats = asyncPool.getStats();
LOGGER.debug(stats.toString());
// There will be a couple in idleCount in error state.
Assert.assertEquals(stats.getIdleCount(), minConns);
Assert.assertEquals(stats.getPoolSize(), minConns);
LOGGER.debug("Restarting server instance");
server.restart();
Thread.sleep(3000);
LOGGER.debug("Server restart successful\n" + asyncPool.getStats());
// Now get 3 connections successfully
for (int i = 0; i < 3; i++) {
keyedFuture = keyedPool.checkoutObject(serverInstance);
conn = keyedFuture.getOne();
Assert.assertNotNull(conn);
}
server.shutdown();
}
use of java.util.concurrent.ExecutorService in project pinot by linkedin.
the class DictionaryToRawIndexConverter method convert.
/**
* Method to perform the conversion for a set of segments in the {@link #_dataDir}
*
* @return True if successful, False otherwise
* @throws Exception
*/
public boolean convert() throws Exception {
if (_help) {
printUsage();
return true;
}
File dataDir = new File(_dataDir);
File outputDir = new File(_outputDir);
if (!dataDir.exists()) {
LOGGER.error("Data directory '{}' does not exist.", _dataDir);
return false;
} else if (outputDir.exists()) {
if (_overwrite) {
LOGGER.info("Overwriting existing output directory '{}'", _outputDir);
FileUtils.deleteQuietly(outputDir);
outputDir = new File(_outputDir);
outputDir.mkdir();
} else {
LOGGER.error("Output directory '{}' already exists, use -overwrite to overwrite", outputDir);
return false;
}
}
File[] segmentFiles = dataDir.listFiles();
if (segmentFiles == null || segmentFiles.length == 0) {
LOGGER.error("Empty data directory '{}'.", _dataDir);
return false;
}
boolean ret = true;
final File outDir = outputDir;
ExecutorService executorService = Executors.newFixedThreadPool(_numThreads);
for (final File segmentDir : segmentFiles) {
executorService.execute(new Runnable() {
@Override
public void run() {
try {
convertSegment(segmentDir, _columns.split("\\s*,\\s*"), outDir, _compressOutput);
} catch (Exception e) {
LOGGER.error("Exception caught while converting segment {}", segmentDir.getName(), e);
e.printStackTrace();
}
}
});
}
executorService.shutdown();
executorService.awaitTermination(1, TimeUnit.HOURS);
return ret;
}
use of java.util.concurrent.ExecutorService in project pinot by linkedin.
the class KeyedPoolImplTest method testShutdownWhileCheckingOut.
@Test
public void testShutdownWhileCheckingOut() throws Exception {
ScheduledExecutorService timedExecutor = new ScheduledThreadPoolExecutor(1);
ExecutorService service = new ThreadPoolExecutor(1, 1, 1, TimeUnit.DAYS, new LinkedBlockingDeque<Runnable>());
int numKeys = 1;
int numResourcesPerKey = 1;
Map<String, List<String>> resources = buildCreateMap(numKeys, numResourcesPerKey);
BlockingTestResourceManager rm = new BlockingTestResourceManager(resources, null, null, null);
KeyedPool<String, String> kPool = new KeyedPoolImpl<String, String>(0, 1, 1000L, 1000 * 60 * 60, rm, timedExecutor, service, null);
kPool.start();
AsyncResponseFuture<String, String> f = (AsyncResponseFuture<String, String>) kPool.checkoutObject(getKey(0));
boolean isTimedout = false;
try {
f.get(2, TimeUnit.SECONDS);
} catch (TimeoutException e) {
isTimedout = true;
}
Assert.assertTrue(isTimedout);
kPool.shutdown().get();
// Future should have been done with error
Assert.assertNull(f.get());
Assert.assertNotNull(f.getError());
boolean cancelled = f.cancel(false);
Assert.assertFalse(cancelled);
Assert.assertFalse(f.isCancelled());
Assert.assertTrue(f.isDone());
rm.getCreateBlockLatch().countDown();
Thread.sleep(5000);
}
Aggregations