use of org.infinispan.manager.ClusterExecutor in project keycloak by keycloak.
the class InfinispanCacheInitializer method startLoadingImpl.
protected void startLoadingImpl(InitializerState state, SessionLoader.LoaderContext loaderCtx) {
// Assume each worker has same processor's count
int processors = Runtime.getRuntime().availableProcessors();
Transport transport = workCache.getCacheManager().getTransport();
// Every worker iteration will be executed on single node. Use 3 failover attempts for each segment (should be sufficient in all cases)
ClusterExecutor clusterExecutor = workCache.getCacheManager().executor().singleNodeSubmission(3);
int errors = 0;
int segmentToLoad = 0;
// try {
SessionLoader.WorkerResult previousResult = null;
SessionLoader.WorkerResult nextResult = null;
int distributedWorkersCount = 0;
boolean firstTryForSegment = true;
while (segmentToLoad < state.getSegmentsCount()) {
if (firstTryForSegment) {
// do not change the node count if it's not the first try
int nodesCount = transport == null ? 1 : transport.getMembers().size();
distributedWorkersCount = processors * nodesCount;
}
log.debugf("Starting next iteration with %d workers", distributedWorkersCount);
List<Integer> segments = state.getSegmentsToLoad(segmentToLoad, distributedWorkersCount);
if (log.isTraceEnabled()) {
log.trace("unfinished segments for this iteration: " + segments);
}
List<CompletableFuture<Void>> futures = new LinkedList<>();
final Queue<SessionLoader.WorkerResult> results = new ConcurrentLinkedQueue<>();
CompletableFuture<Void> completableFuture = null;
for (Integer segment : segments) {
SessionLoader.WorkerContext workerCtx = sessionLoader.computeWorkerContext(loaderCtx, segment, segment - segmentToLoad, previousResult);
SessionInitializerWorker worker = new SessionInitializerWorker();
worker.setWorkerEnvironment(loaderCtx, workerCtx, sessionLoader, workCache.getName());
completableFuture = clusterExecutor.submitConsumer(worker, (address, workerResult, throwable) -> {
log.tracef("Calling triConsumer on address %s, throwable message: %s, segment: %s", address, throwable == null ? "null" : throwable.getMessage(), workerResult == null ? null : workerResult.getSegment());
if (throwable != null) {
throw new CacheException(throwable);
}
results.add(workerResult);
});
futures.add(completableFuture);
}
boolean anyFailure = false;
// Make sure that all workers are finished
for (CompletableFuture<Void> future : futures) {
try {
future.get();
} catch (InterruptedException ie) {
anyFailure = true;
errors++;
log.error("Interruped exception when computed future. Errors: " + errors, ie);
} catch (ExecutionException ee) {
anyFailure = true;
errors++;
log.error("ExecutionException when computed future. Errors: " + errors, ee);
}
}
// Check the results
for (SessionLoader.WorkerResult result : results) {
if (result.isSuccess()) {
state.markSegmentFinished(result.getSegment());
if (result.getSegment() == segmentToLoad + distributedWorkersCount - 1) {
// last result for next iteration when complete
nextResult = result;
}
} else {
if (log.isTraceEnabled()) {
log.tracef("Segment %d failed to compute", result.getSegment());
}
anyFailure = true;
}
}
if (errors >= maxErrors) {
throw new RuntimeException("Maximum count of worker errors occured. Limit was " + maxErrors + ". See server.log for details");
}
if (!anyFailure) {
// everything is OK, prepare the new row
segmentToLoad += distributedWorkersCount;
firstTryForSegment = true;
previousResult = nextResult;
nextResult = null;
if (log.isTraceEnabled()) {
log.debugf("New initializer state is: %s", state);
}
} else {
// some segments failed, try to load unloaded segments
firstTryForSegment = false;
}
}
// Push the state after computation is finished
saveStateToCache(state);
// Loader callback after the task is finished
this.sessionLoader.afterAllSessionsLoaded(this);
}
Aggregations