use of org.infinispan.commons.CacheException in project wildfly by wildfly.
the class CacheRegistry method topologyChanged.
@TopologyChanged
public CompletionStage<Void> topologyChanged(TopologyChangedEvent<Address, Map.Entry<K, V>> event) {
if (!event.isPre()) {
ConsistentHash previousHash = event.getWriteConsistentHashAtStart();
List<Address> previousMembers = previousHash.getMembers();
ConsistentHash hash = event.getWriteConsistentHashAtEnd();
List<Address> members = hash.getMembers();
if (!members.equals(previousMembers)) {
Cache<Address, Map.Entry<K, V>> cache = event.getCache().getAdvancedCache().withFlags(Flag.FORCE_SYNCHRONOUS);
Address localAddress = cache.getCacheManager().getAddress();
// Determine which nodes have left the cache view
Set<Address> leftMembers = new HashSet<>(previousMembers);
leftMembers.removeAll(members);
if (!leftMembers.isEmpty()) {
Locality locality = new ConsistentHashLocality(cache, hash);
// We're only interested in the entries for which we are the primary owner
Iterator<Address> addresses = leftMembers.iterator();
while (addresses.hasNext()) {
if (!locality.isLocal(addresses.next())) {
addresses.remove();
}
}
}
// If this is a merge after cluster split: re-populate the cache registry with lost registry entries
boolean restoreLocalEntry = !previousMembers.contains(localAddress);
if (!leftMembers.isEmpty() || restoreLocalEntry) {
try {
this.topologyChangeExecutor.submit(() -> {
if (!leftMembers.isEmpty()) {
Map<K, V> removed = new HashMap<>();
try {
for (Address leftMember : leftMembers) {
Map.Entry<K, V> old = cache.remove(leftMember);
if (old != null) {
removed.put(old.getKey(), old.getValue());
}
}
} catch (CacheException e) {
ClusteringServerLogger.ROOT_LOGGER.registryPurgeFailed(e, this.cache.getCacheManager().toString(), this.cache.getName(), leftMembers);
}
if (!removed.isEmpty()) {
this.notifyListeners(Event.Type.CACHE_ENTRY_REMOVED, removed);
}
}
if (restoreLocalEntry) {
// If this node is not a member at merge start, its mapping may have been lost and need to be recreated
try {
if (cache.put(localAddress, this.entry) == null) {
// Local cache events do not trigger notifications
this.notifyListeners(Event.Type.CACHE_ENTRY_CREATED, this.entry);
}
} catch (CacheException e) {
ClusteringServerLogger.ROOT_LOGGER.failedToRestoreLocalRegistryEntry(e, this.cache.getCacheManager().toString(), this.cache.getName());
}
}
});
} catch (RejectedExecutionException e) {
// Executor was shutdown
}
}
}
}
return CompletableFutures.completedNull();
}
use of org.infinispan.commons.CacheException in project wildfly by wildfly.
the class CacheRegistry method close.
@Override
public void close() {
this.cache.removeListener(this);
this.shutdown(this.topologyChangeExecutor);
try (Batch batch = this.batcher.createBatch()) {
// If this remove fails, the entry will be auto-removed on topology change by the new primary owner
this.cache.getAdvancedCache().withFlags(Flag.IGNORE_RETURN_VALUES, Flag.FAIL_SILENTLY).remove(this.group.getAddress(this.group.getLocalMember()));
} catch (CacheException e) {
ClusteringLogger.ROOT_LOGGER.warn(e.getLocalizedMessage(), e);
} finally {
// Cleanup any unregistered listeners
for (ExecutorService executor : this.listeners.values()) {
this.shutdown(executor);
}
this.listeners.clear();
this.closeTask.run();
}
}
use of org.infinispan.commons.CacheException in project keycloak by keycloak.
the class InfinispanCacheInitializer method startLoadingImpl.
protected void startLoadingImpl(InitializerState state, SessionLoader.LoaderContext loaderCtx) {
// Assume each worker has same processor's count
int processors = Runtime.getRuntime().availableProcessors();
Transport transport = workCache.getCacheManager().getTransport();
// Every worker iteration will be executed on single node. Use 3 failover attempts for each segment (should be sufficient in all cases)
ClusterExecutor clusterExecutor = workCache.getCacheManager().executor().singleNodeSubmission(3);
int errors = 0;
int segmentToLoad = 0;
// try {
SessionLoader.WorkerResult previousResult = null;
SessionLoader.WorkerResult nextResult = null;
int distributedWorkersCount = 0;
boolean firstTryForSegment = true;
while (segmentToLoad < state.getSegmentsCount()) {
if (firstTryForSegment) {
// do not change the node count if it's not the first try
int nodesCount = transport == null ? 1 : transport.getMembers().size();
distributedWorkersCount = processors * nodesCount;
}
log.debugf("Starting next iteration with %d workers", distributedWorkersCount);
List<Integer> segments = state.getSegmentsToLoad(segmentToLoad, distributedWorkersCount);
if (log.isTraceEnabled()) {
log.trace("unfinished segments for this iteration: " + segments);
}
List<CompletableFuture<Void>> futures = new LinkedList<>();
final Queue<SessionLoader.WorkerResult> results = new ConcurrentLinkedQueue<>();
CompletableFuture<Void> completableFuture = null;
for (Integer segment : segments) {
SessionLoader.WorkerContext workerCtx = sessionLoader.computeWorkerContext(loaderCtx, segment, segment - segmentToLoad, previousResult);
SessionInitializerWorker worker = new SessionInitializerWorker();
worker.setWorkerEnvironment(loaderCtx, workerCtx, sessionLoader, workCache.getName());
completableFuture = clusterExecutor.submitConsumer(worker, (address, workerResult, throwable) -> {
log.tracef("Calling triConsumer on address %s, throwable message: %s, segment: %s", address, throwable == null ? "null" : throwable.getMessage(), workerResult == null ? null : workerResult.getSegment());
if (throwable != null) {
throw new CacheException(throwable);
}
results.add(workerResult);
});
futures.add(completableFuture);
}
boolean anyFailure = false;
// Make sure that all workers are finished
for (CompletableFuture<Void> future : futures) {
try {
future.get();
} catch (InterruptedException ie) {
anyFailure = true;
errors++;
log.error("Interruped exception when computed future. Errors: " + errors, ie);
} catch (ExecutionException ee) {
anyFailure = true;
errors++;
log.error("ExecutionException when computed future. Errors: " + errors, ee);
}
}
// Check the results
for (SessionLoader.WorkerResult result : results) {
if (result.isSuccess()) {
state.markSegmentFinished(result.getSegment());
if (result.getSegment() == segmentToLoad + distributedWorkersCount - 1) {
// last result for next iteration when complete
nextResult = result;
}
} else {
if (log.isTraceEnabled()) {
log.tracef("Segment %d failed to compute", result.getSegment());
}
anyFailure = true;
}
}
if (errors >= maxErrors) {
throw new RuntimeException("Maximum count of worker errors occured. Limit was " + maxErrors + ". See server.log for details");
}
if (!anyFailure) {
// everything is OK, prepare the new row
segmentToLoad += distributedWorkersCount;
firstTryForSegment = true;
previousResult = nextResult;
nextResult = null;
if (log.isTraceEnabled()) {
log.debugf("New initializer state is: %s", state);
}
} else {
// some segments failed, try to load unloaded segments
firstTryForSegment = false;
}
}
// Push the state after computation is finished
saveStateToCache(state);
// Loader callback after the task is finished
this.sessionLoader.afterAllSessionsLoaded(this);
}
Aggregations