use of io.lettuce.core.resource.ClientResources in project micronaut-redis by micronaut-projects.
the class AbstractRedisClientFactory method redisClient.
/**
* Creates the {@link RedisClient} from the configuration.
*
* @param config The configuration
* @param optionalClientResources The ClientResources
* @param mutators The list of mutators
* @return The {@link RedisClient}
*/
public RedisClient redisClient(AbstractRedisConfiguration config, @Nullable ClientResources optionalClientResources, @Nullable List<ClientResourcesMutator> mutators) {
ClientResources clientResources = configureClientResources(config, optionalClientResources, mutators);
if (clientResources == null) {
return redisClient(config);
}
Optional<RedisURI> uri = config.getUri();
return uri.map(redisURI -> RedisClient.create(clientResources, redisURI)).orElseGet(() -> RedisClient.create(clientResources, config));
}
use of io.lettuce.core.resource.ClientResources in project iPLAss by ISID.
the class RedisCacheStoreFactory method inited.
@Override
public void inited(CacheService service, Config config) {
RedisService rs = config.getDependentService(RedisService.class);
server = rs.getRedisServer(serverName);
if (server == null) {
throw new MtpException("Unknown redis server name: " + serverName);
}
ClientResources resouces = DefaultClientResources.builder().build();
RedisURI.Builder uriBuilder = RedisURI.builder().withHost(server.getHost()).withPort(server.getPort());
if (server.getTimeout() > 0) {
uriBuilder.withTimeout(Duration.ofSeconds(server.getTimeout()));
}
client = RedisClient.create(resouces, uriBuilder.build());
}
use of io.lettuce.core.resource.ClientResources in project Sermant by huaweicloud.
the class RedisConnCluster method getConnectionFactory.
/**
* 往IOC容器中注册RedisConnectionFactory对象
* <p>
* 主要是针对redis集群的配置,暂时不实现,只实现单机版
*
* @return LettuceConnectionFactory
*/
@Bean(destroyMethod = "destroy")
@Conditional(value = { RedisClusterCondition.class })
@Scope(BeanDefinition.SCOPE_PROTOTYPE)
public LettuceConnectionFactory getConnectionFactory() {
// 集群配置
Map<String, Object> source = new HashMap<>();
source.put("spring.redis.cluster.nodes", nodes);
source.put("spring.redis.timeout", timeout);
source.put("spring.redis.database", database);
source.put("spring.redis.password", password);
RedisClusterConfiguration redisClusterConfiguration = new RedisClusterConfiguration(new MapPropertySource("RedisClusterConfiguration", source));
redisClusterConfiguration.setMaxRedirects(redirects);
// 连接池配置
GenericObjectPoolConfig genericObjectPoolConfig = new GenericObjectPoolConfig();
genericObjectPoolConfig.setMaxIdle(maxIdle);
genericObjectPoolConfig.setMinIdle(minIdle);
genericObjectPoolConfig.setMaxTotal(maxActive);
genericObjectPoolConfig.setMaxWaitMillis(maxWait);
genericObjectPoolConfig.setMaxWaitMillis(MAX_WAIT_MS);
genericObjectPoolConfig.setTimeBetweenEvictionRunsMillis(EVICTION_RUNS_MS);
// 开启自适应集群拓扑刷新和周期拓扑刷新
ClusterTopologyRefreshOptions clusterTopologyRefreshOptions = ClusterTopologyRefreshOptions.builder().enableAllAdaptiveRefreshTriggers().adaptiveRefreshTriggersTimeout(Duration.ofSeconds(REFRESH_TRIGGERS_TIMEOUT)).enablePeriodicRefresh(Duration.ofSeconds(PERIODIC_REFRESH)).build();
ClientResources clientResources = DefaultClientResources.builder().dnsResolver(DnsResolvers.JVM_DEFAULT).build();
final SocketOptions socketOptions = SocketOptions.builder().connectTimeout(Duration.ofMillis(timeout)).build();
final ClientOptions clientOptions = ClusterClientOptions.builder().socketOptions(socketOptions).autoReconnect(true).topologyRefreshOptions(clusterTopologyRefreshOptions).build();
LettuceClientConfiguration clientConfig = LettucePoolingClientConfiguration.builder().poolConfig(genericObjectPoolConfig).clientOptions(clientOptions).clientResources(clientResources).commandTimeout(Duration.ofMillis(timeout)).build();
return new LettuceConnectionFactory(redisClusterConfiguration, clientConfig);
}
use of io.lettuce.core.resource.ClientResources in project lettuce-core by lettuce-io.
the class ConnectionFailureIntegrationTests method pingOnConnectFailureShouldCloseConnectionOnReconnect.
@Test
void pingOnConnectFailureShouldCloseConnectionOnReconnect() throws Exception {
BlockingQueue<Channel> ref = new LinkedBlockingQueue<>();
ClientResources clientResources = ClientResources.builder().nettyCustomizer(new NettyCustomizer() {
@Override
public void afterChannelInitialized(Channel channel) {
ref.add(channel);
}
}).build();
RedisURI redisUri = RedisURI.create(TestSettings.host(), TestSettings.port());
RedisClient client = RedisClient.create(clientResources, redisUri);
client.setOptions(ClientOptions.builder().pingBeforeActivateConnection(true).build());
StatefulRedisConnection<String, String> connection = client.connect();
ConnectionWatchdog connectionWatchdog = ConnectionTestUtil.getConnectionWatchdog(connection);
connectionWatchdog.setListenOnChannelInactive(false);
connection.async().quit();
// Cluster node with auth
redisUri.setPort(7385);
connectionWatchdog.setListenOnChannelInactive(true);
connectionWatchdog.scheduleReconnect();
Wait.untilTrue(() -> ref.size() > 1).waitOrTimeout();
redisUri.setPort(TestSettings.port());
Channel initial = ref.take();
assertThat(initial.isOpen()).isFalse();
Channel reconnect = ref.take();
Wait.untilTrue(() -> !reconnect.isOpen()).waitOrTimeout();
assertThat(reconnect.isOpen()).isFalse();
FastShutdown.shutdown(client);
FastShutdown.shutdown(clientResources);
}
use of io.lettuce.core.resource.ClientResources in project lettuce-core by lettuce-io.
the class DefaultClusterTopologyRefresh method loadViews.
/**
* Load partition views from a collection of {@link RedisURI}s and return the view per {@link RedisURI}. Partitions contain
* an ordered list of {@link RedisClusterNode}s. The sort key is latency. Nodes with lower latency come first.
*
* @param seed collection of {@link RedisURI}s
* @param connectTimeout connect timeout
* @param discovery {@code true} to discover additional nodes
* @return mapping between {@link RedisURI} and {@link Partitions}
*/
@Override
public CompletionStage<Map<RedisURI, Partitions>> loadViews(Iterable<RedisURI> seed, Duration connectTimeout, boolean discovery) {
if (!isEventLoopActive()) {
return CompletableFuture.completedFuture(Collections.emptyMap());
}
long commandTimeoutNs = getCommandTimeoutNs(seed);
ConnectionTracker tracker = new ConnectionTracker();
long connectionTimeout = commandTimeoutNs + connectTimeout.toNanos();
openConnections(tracker, seed, connectionTimeout, TimeUnit.NANOSECONDS);
CompletableFuture<NodeTopologyViews> composition = tracker.whenComplete(map -> {
return new Connections(clientResources, map);
}).thenCompose(connections -> {
Requests requestedTopology = connections.requestTopology(commandTimeoutNs, TimeUnit.NANOSECONDS);
Requests requestedInfo = connections.requestInfo(commandTimeoutNs, TimeUnit.NANOSECONDS);
return CompletableFuture.allOf(requestedTopology.allCompleted(), requestedInfo.allCompleted()).thenApplyAsync(ignore -> getNodeSpecificViews(requestedTopology, requestedInfo), clientResources.eventExecutorGroup()).thenCompose(views -> {
if (discovery && isEventLoopActive()) {
Set<RedisURI> allKnownUris = views.getClusterNodes();
Set<RedisURI> discoveredNodes = difference(allKnownUris, toSet(seed));
if (discoveredNodes.isEmpty()) {
return CompletableFuture.completedFuture(views);
}
openConnections(tracker, discoveredNodes, connectionTimeout, TimeUnit.NANOSECONDS);
return tracker.whenComplete(map -> {
return new Connections(clientResources, map).retainAll(discoveredNodes);
}).thenCompose(newConnections -> {
Requests additionalTopology = newConnections.requestTopology(commandTimeoutNs, TimeUnit.NANOSECONDS).mergeWith(requestedTopology);
Requests additionalInfo = newConnections.requestInfo(commandTimeoutNs, TimeUnit.NANOSECONDS).mergeWith(requestedInfo);
return CompletableFuture.allOf(additionalTopology.allCompleted(), additionalInfo.allCompleted()).thenApplyAsync(ignore2 -> getNodeSpecificViews(additionalTopology, additionalInfo), clientResources.eventExecutorGroup());
});
}
return CompletableFuture.completedFuture(views);
}).whenComplete((ignore, throwable) -> {
if (throwable != null) {
try {
tracker.close();
} catch (Exception e) {
logger.debug("Cannot close ClusterTopologyRefresh connections", e);
}
}
}).thenCompose((it) -> tracker.close().thenApply(ignore -> it)).thenCompose(it -> {
if (it.isEmpty()) {
Exception exception = tryFail(requestedTopology, tracker, seed);
return Futures.failed(exception);
}
return CompletableFuture.completedFuture(it);
});
});
return composition.thenApply(NodeTopologyViews::toMap);
}
Aggregations