use of org.keycloak.models.sessions.infinispan.changes.SessionEntityWrapper in project keycloak by keycloak.
the class InfinispanUserLoginFailureProvider method removeAllLocalUserLoginFailuresEvent.
protected void removeAllLocalUserLoginFailuresEvent(String realmId) {
log.tracef("removeAllLocalUserLoginFailuresEvent(%s)%s", realmId, getShortStackTrace());
FuturesHelper futures = new FuturesHelper();
Cache<LoginFailureKey, SessionEntityWrapper<LoginFailureEntity>> localCache = CacheDecorators.localCache(loginFailureCache);
Cache<LoginFailureKey, SessionEntityWrapper<LoginFailureEntity>> localCacheStoreIgnore = CacheDecorators.skipCacheLoaders(localCache);
localCacheStoreIgnore.entrySet().stream().filter(UserLoginFailurePredicate.create(realmId)).map(Mappers.loginFailureId()).forEach(loginFailureKey -> {
// Remove loginFailure from remoteCache too. Use removeAsync for better perf
Future future = localCache.removeAsync(loginFailureKey);
futures.addTask(future);
});
futures.waitForAllToFinish();
log.debugf("Removed %d login failures in realm %s", futures.size(), realmId);
}
use of org.keycloak.models.sessions.infinispan.changes.SessionEntityWrapper in project keycloak by keycloak.
the class ConcurrencyJDGOfflineBackupsTest method main.
public static void main(String[] args) throws Exception {
Cache<String, SessionEntityWrapper<UserSessionEntity>> cache1 = createManager(1).getCache(InfinispanConnectionProvider.USER_SESSION_CACHE_NAME);
try {
// Create initial item
UserSessionEntity session = new UserSessionEntity();
session.setId("123");
session.setRealmId("foo");
session.setBrokerSessionId("!23123123");
session.setBrokerUserId(null);
session.setUser("foo");
session.setLoginUsername("foo");
session.setIpAddress("123.44.143.178");
session.setStarted(Time.currentTime());
session.setLastSessionRefresh(Time.currentTime());
// AuthenticatedClientSessionEntity clientSession = new AuthenticatedClientSessionEntity();
// clientSession.setAuthMethod("saml");
// clientSession.setAction("something");
// clientSession.setTimestamp(1234);
// clientSession.setProtocolMappers(new HashSet<>(Arrays.asList("mapper1", "mapper2")));
// clientSession.setRoles(new HashSet<>(Arrays.asList("role1", "role2")));
// session.getAuthenticatedClientSessions().put(CLIENT_1_UUID.toString(), clientSession.getId());
SessionEntityWrapper<UserSessionEntity> wrappedSession = new SessionEntityWrapper<>(session);
// Some dummy testing of remoteStore behaviour
logger.info("Before put");
AtomicInteger successCount = new AtomicInteger(0);
AtomicInteger errorsCount = new AtomicInteger(0);
for (int i = 0; i < 100; i++) {
try {
cache1.getAdvancedCache().withFlags(// will still invoke remoteStore . Just doesn't propagate to cluster
Flag.CACHE_MODE_LOCAL).put("123", wrappedSession);
successCount.incrementAndGet();
Thread.sleep(1000);
logger.infof("Success in the iteration: %d", i);
} catch (HotRodClientException hrce) {
logger.errorf("Failed to put the item in the iteration: %d ", i);
errorsCount.incrementAndGet();
}
}
logger.infof("SuccessCount: %d, ErrorsCount: %d", successCount.get(), errorsCount.get());
// logger.info("After put");
//
// cache1.replace("123", wrappedSession);
//
// logger.info("After replace");
//
// cache1.get("123");
//
// logger.info("After cache1.get");
// cache2.get("123");
//
// logger.info("After cache2.get");
} finally {
// Finish JVM
cache1.getCacheManager().stop();
}
}
use of org.keycloak.models.sessions.infinispan.changes.SessionEntityWrapper in project keycloak by keycloak.
the class RemoteCacheSessionListener method replaceRemoteEntityInCache.
protected void replaceRemoteEntityInCache(K key, long eventVersion) {
// TODO can be optimized and remoteSession sent in the event itself?
AtomicBoolean replaced = new AtomicBoolean(false);
int replaceRetries = 0;
int sleepInterval = 25;
do {
replaceRetries++;
SessionEntityWrapper<V> localEntityWrapper = cache.get(key);
VersionedValue<SessionEntityWrapper<V>> remoteSessionVersioned = remoteCache.getWithMetadata(key);
// Probably already removed
if (remoteSessionVersioned == null || remoteSessionVersioned.getValue() == null) {
logger.debugf("Entity '%s' not present in remoteCache. Ignoring replace", key);
return;
}
if (remoteSessionVersioned.getVersion() < eventVersion) {
try {
logger.debugf("Got replace remote entity event prematurely for entity '%s', will try again. Event version: %d, got: %d", key, eventVersion, remoteSessionVersioned == null ? -1 : remoteSessionVersioned.getVersion());
// using exponential backoff
Thread.sleep(new Random().nextInt(sleepInterval));
continue;
} catch (InterruptedException ex) {
continue;
} finally {
sleepInterval = sleepInterval << 1;
}
}
SessionEntity remoteSession = remoteSessionVersioned.getValue().getEntity();
logger.debugf("Read session entity from the remote cache: %s . replaceRetries=%d", remoteSession, replaceRetries);
SessionEntityWrapper<V> sessionWrapper = remoteSession.mergeRemoteEntityWithLocalEntity(localEntityWrapper);
KeycloakModelUtils.runJobInTransaction(sessionFactory, (session -> {
RealmModel realm = session.realms().getRealm(sessionWrapper.getEntity().getRealmId());
long lifespanMs = lifespanMsLoader.apply(realm, sessionWrapper.getEntity());
long maxIdleTimeMs = maxIdleTimeMsLoader.apply(realm, sessionWrapper.getEntity());
// We received event from remoteCache, so we won't update it back
replaced.set(cache.getAdvancedCache().withFlags(Flag.SKIP_CACHE_STORE, Flag.SKIP_CACHE_LOAD, Flag.IGNORE_RETURN_VALUES).replace(key, localEntityWrapper, sessionWrapper, lifespanMs, TimeUnit.MILLISECONDS, maxIdleTimeMs, TimeUnit.MILLISECONDS));
}));
if (!replaced.get()) {
logger.debugf("Did not succeed in merging sessions, will try again: %s", remoteSession);
}
} while (replaceRetries < MAXIMUM_REPLACE_RETRIES && !replaced.get());
}
use of org.keycloak.models.sessions.infinispan.changes.SessionEntityWrapper in project keycloak by keycloak.
the class ConcurrencyDistributedRemoveSessionTest method createSessionEntity.
private static SessionEntityWrapper<UserSessionEntity> createSessionEntity(String sessionId) {
// Create 100 initial sessions
UserSessionEntity session = new UserSessionEntity();
session.setId(sessionId);
session.setRealmId("foo");
session.setBrokerSessionId("!23123123");
session.setBrokerUserId(null);
session.setUser("foo");
session.setLoginUsername("foo");
session.setIpAddress("123.44.143.178");
session.setStarted(Time.currentTime());
session.setLastSessionRefresh(Time.currentTime());
AuthenticatedClientSessionEntity clientSession = new AuthenticatedClientSessionEntity(UUID.randomUUID());
clientSession.setAuthMethod("saml");
clientSession.setAction("something");
clientSession.setTimestamp(1234);
session.getAuthenticatedClientSessions().put(CLIENT_1_UUID.toString(), clientSession.getId());
SessionEntityWrapper<UserSessionEntity> wrappedSession = new SessionEntityWrapper<>(session);
return wrappedSession;
}
use of org.keycloak.models.sessions.infinispan.changes.SessionEntityWrapper in project keycloak by keycloak.
the class ConcurrencyJDGCacheReplaceTest method main.
public static void main(String[] args) throws Exception {
Cache<String, SessionEntityWrapper<UserSessionEntity>> cache1 = createManager(1).getCache(InfinispanConnectionProvider.USER_SESSION_CACHE_NAME);
Cache<String, SessionEntityWrapper<UserSessionEntity>> cache2 = createManager(2).getCache(InfinispanConnectionProvider.USER_SESSION_CACHE_NAME);
// Create initial item
UserSessionEntity session = new UserSessionEntity();
session.setId("123");
session.setRealmId("foo");
session.setBrokerSessionId("!23123123");
session.setBrokerUserId(null);
session.setUser("foo");
session.setLoginUsername("foo");
session.setIpAddress("123.44.143.178");
session.setStarted(Time.currentTime());
session.setLastSessionRefresh(Time.currentTime());
AuthenticatedClientSessionEntity clientSession = new AuthenticatedClientSessionEntity(UUID.randomUUID());
clientSession.setAuthMethod("saml");
clientSession.setAction("something");
clientSession.setTimestamp(1234);
session.getAuthenticatedClientSessions().put(CLIENT_1_UUID.toString(), clientSession.getId());
SessionEntityWrapper<UserSessionEntity> wrappedSession = new SessionEntityWrapper<>(session);
// Some dummy testing of remoteStore behaviour
logger.info("Before put");
cache1.getAdvancedCache().withFlags(// will still invoke remoteStore . Just doesn't propagate to cluster
Flag.CACHE_MODE_LOCAL).put("123", wrappedSession);
logger.info("After put");
cache1.replace("123", wrappedSession);
logger.info("After replace");
cache1.get("123");
logger.info("After cache1.get");
cache2.get("123");
logger.info("After cache2.get");
cache1.get("123");
logger.info("After cache1.get - second call");
cache2.get("123");
logger.info("After cache2.get - second call");
cache2.replace("123", wrappedSession);
logger.info("After replace - second call");
cache1.get("123");
logger.info("After cache1.get - third call");
cache2.get("123");
logger.info("After cache2.get - third call");
cache1.getAdvancedCache().withFlags(Flag.SKIP_CACHE_LOAD).entrySet().stream().forEach(e -> {
});
logger.info("After cache1.stream");
// Explicitly call put on remoteCache (KcRemoteCache.write ignores remote writes)
InfinispanUtil.getRemoteCache(cache1).put("123", session);
InfinispanUtil.getRemoteCache(cache2).replace("123", session);
// Create caches, listeners and finally worker threads
remoteCache1 = InfinispanUtil.getRemoteCache(cache1);
remoteCache2 = InfinispanUtil.getRemoteCache(cache2);
// Manual test of lifespans
testLifespans();
Thread worker1 = createWorker(cache1, 1);
Thread worker2 = createWorker(cache2, 2);
long start = System.currentTimeMillis();
// Start and join workers
worker1.start();
worker2.start();
worker1.join();
worker2.join();
long took = System.currentTimeMillis() - start;
// // Output
// for (Map.Entry<String, EntryInfo> entry : state.entrySet()) {
// System.out.println(entry.getKey() + ":::" + entry.getValue());
// worker1.cache.remove(entry.getKey());
// }
System.out.println("Finished. Took: " + took + " ms. Notes: " + cache1.get("123").getEntity().getNotes().size() + ", successfulListenerWrites: " + successfulListenerWrites.get() + ", successfulListenerWrites2: " + successfulListenerWrites2.get() + ", failedReplaceCounter: " + failedReplaceCounter.get() + ", failedReplaceCounter2: " + failedReplaceCounter2.get());
System.out.println("Sleeping before other report");
Thread.sleep(2000);
System.out.println("Finished. Took: " + took + " ms. Notes: " + cache1.get("123").getEntity().getNotes().size() + ", successfulListenerWrites: " + successfulListenerWrites.get() + ", successfulListenerWrites2: " + successfulListenerWrites2.get() + ", failedReplaceCounter: " + failedReplaceCounter.get() + ", failedReplaceCounter2: " + failedReplaceCounter2.get());
System.out.println("remoteCache1.notes: " + ((UserSessionEntity) remoteCache1.get("123")).getNotes().size());
System.out.println("remoteCache2.notes: " + ((UserSessionEntity) remoteCache2.get("123")).getNotes().size());
System.out.println("Histogram: ");
// shutdown pools
for (ExecutorService ex : executors) {
ex.shutdown();
}
// Finish JVM
cache1.getCacheManager().stop();
cache2.getCacheManager().stop();
}
Aggregations