use of java.util.concurrent.ExecutorCompletionService in project cdap by caskdata.
the class SparkTransactionHandlerTest method testConcurrentJobRun.
/**
* Tests concurrent jobs submission.
*/
@Test(timeout = 120000L)
public void testConcurrentJobRun() throws Exception {
final AtomicInteger jobIdGen = new AtomicInteger();
final AtomicInteger stageIdGen = new AtomicInteger();
// Start 30 jobs concurrently
int threads = 30;
ExecutorService executor = Executors.newFixedThreadPool(threads);
try {
final CyclicBarrier barrier = new CyclicBarrier(threads);
final Random random = new Random();
CompletionService<Boolean> completionService = new ExecutorCompletionService<>(executor);
// For each run, return the verification result
for (int i = 0; i < threads; i++) {
completionService.submit(new Callable<Boolean>() {
@Override
public Boolean call() throws Exception {
barrier.await();
try {
// Run job with 2-5 stages, with job either succeeded or failed
testRunJob(jobIdGen.getAndIncrement(), generateStages(stageIdGen, 2 + random.nextInt(4)), random.nextBoolean());
return true;
} catch (Throwable t) {
LOG.error("testRunJob failed.", t);
return false;
}
}
});
}
// All testRunJob must be completed successfully
boolean result = true;
for (int i = 0; i < threads; i++) {
result = result && completionService.take().get();
}
Assert.assertTrue(result);
} finally {
executor.shutdown();
}
}
use of java.util.concurrent.ExecutorCompletionService in project ddf by codice.
the class CachingFederationStrategy method sourceFederate.
private QueryResponse sourceFederate(List<Source> sources, final QueryRequest queryRequest) {
if (LOGGER.isDebugEnabled()) {
for (Source source : sources) {
if (source != null) {
LOGGER.debug("source to query: {}", source.getId());
}
}
}
Query originalQuery = queryRequest.getQuery();
int offset = originalQuery.getStartIndex();
final int pageSize = originalQuery.getPageSize();
// limit offset to max value
if (offset > this.maxStartIndex) {
offset = this.maxStartIndex;
}
final QueryResponseImpl queryResponseQueue = new QueryResponseImpl(queryRequest, null);
Map<Future<SourceResponse>, QueryRequest> futures = new HashMap<>();
Query modifiedQuery = getModifiedQuery(originalQuery, sources.size(), offset, pageSize);
QueryRequest modifiedQueryRequest = new QueryRequestImpl(modifiedQuery, queryRequest.isEnterprise(), queryRequest.getSourceIds(), queryRequest.getProperties());
CompletionService<SourceResponse> queryCompletion = new ExecutorCompletionService<>(queryExecutorService);
// Do NOT call source.isAvailable() when checking sources
for (final Source source : sources) {
if (source != null) {
if (!futuresContainsSource(source, futures)) {
LOGGER.debug("running query on source: {}", source.getId());
QueryRequest sourceQueryRequest = new QueryRequestImpl(modifiedQuery, queryRequest.isEnterprise(), Collections.singleton(source.getId()), new HashMap<>(queryRequest.getProperties()));
try {
for (PreFederatedQueryPlugin service : preQuery) {
try {
sourceQueryRequest = service.process(source, sourceQueryRequest);
} catch (PluginExecutionException e) {
LOGGER.info("Error executing PreFederatedQueryPlugin", e);
}
}
} catch (StopProcessingException e) {
LOGGER.info("Plugin stopped processing", e);
}
if (source instanceof CatalogProvider && SystemInfo.getSiteName().equals(source.getId())) {
// TODO RAP 12 Jul 16: DDF-2294 - Extract into a new PreFederatedQueryPlugin
sourceQueryRequest = validationQueryFactory.getQueryRequestWithValidationFilter(sourceQueryRequest, showErrors, showWarnings);
}
futures.put(queryCompletion.submit(new CallableSourceResponse(source, sourceQueryRequest)), sourceQueryRequest);
} else {
LOGGER.info("Duplicate source found with name {}. Ignoring second one.", source.getId());
}
}
}
QueryResponseImpl offsetResults = null;
// OffsetResultHandler does.
if (offset > 1 && sources.size() > 1) {
offsetResults = new QueryResponseImpl(queryRequest, null);
queryExecutorService.submit(new OffsetResultHandler(queryResponseQueue, offsetResults, pageSize, offset));
}
queryExecutorService.submit(sortedQueryMonitorFactory.createMonitor(queryCompletion, futures, queryResponseQueue, modifiedQueryRequest, postQuery));
QueryResponse queryResponse;
if (offset > 1 && sources.size() > 1) {
queryResponse = offsetResults;
LOGGER.debug("returning offsetResults");
} else {
queryResponse = queryResponseQueue;
LOGGER.debug("returning returnResults: {}", queryResponse);
}
LOGGER.debug("returning Query Results: {}", queryResponse);
return queryResponse;
}
use of java.util.concurrent.ExecutorCompletionService in project ddf by codice.
the class CswQueryResponseTransformer method multiThreadedMarshal.
/**
* Multi-threaded marshal of metacard assumes that the query size is unbounded to guard against
* resource exhaustion with fixed thread-pool and fixed work-queue. CPU-bound for optimum utilization
* from availableProcessors()+1 thread pool.
*
* @param results - the list of results to marshal
* @param recordSchema - the schema
* @param arguments - additional args
* @return - the marshaled results
* @throws CatalogTransformerException
*/
private String multiThreadedMarshal(List<Result> results, AtomicLong numResults, String recordSchema, final Map<String, Serializable> arguments) throws CatalogTransformerException {
CompletionService<BinaryContent> completionService = new ExecutorCompletionService<>(queryExecutor);
final MetacardTransformer transformer = metacardTransformerManager.getTransformerBySchema(recordSchema);
if (transformer == null) {
throw new CatalogTransformerException("Cannot find transformer for schema: " + recordSchema);
}
Map<Future<BinaryContent>, Result> futures = new HashMap<>(results.size());
for (Result result : results) {
final Metacard mc = result.getMetacard();
// the "current" thread will run submitted task when queueSize exceeded; effectively
// blocking enqueue of more tasks.
futures.put(completionService.submit(() -> {
BinaryContent content = transformer.transform(mc, arguments);
return content;
}), result);
}
InputStream[] contents = new InputStream[results.size()];
while (!futures.isEmpty()) {
try {
Future<BinaryContent> completedFuture = completionService.take();
int index = results.indexOf(futures.get(completedFuture));
try {
contents[index] = completedFuture.get().getInputStream();
} catch (ExecutionException | CancellationException | InterruptedException e) {
LOGGER.debug("Error transforming Metacard", e);
numResults.decrementAndGet();
} finally {
futures.remove(completedFuture);
}
} catch (InterruptedException e) {
LOGGER.debug("Metacard transform interrupted", e);
}
}
CharArrayWriter accum = new CharArrayWriter(ACCUM_INITIAL_SIZE);
for (InputStream is : contents) {
try {
if (is != null) {
IOUtils.copy(is, accum);
}
} catch (IOException e) {
LOGGER.debug("Error copying Metacard Binary content", e);
}
}
return accum.toString();
}
Aggregations