use of org.apache.storm.redis.common.config.JedisClusterConfig in project storm by apache.
the class RedisKeyValueStateProvider method getRedisKeyValueState.
private RedisKeyValueState getRedisKeyValueState(String namespace, Map<String, Object> topoConf, TopologyContext context, StateConfig config) throws Exception {
JedisPoolConfig jedisPoolConfig = getJedisPoolConfig(config);
JedisClusterConfig jedisClusterConfig = getJedisClusterConfig(config);
if (jedisPoolConfig == null && jedisClusterConfig == null) {
jedisPoolConfig = buildDefaultJedisPoolConfig();
}
if (jedisPoolConfig != null) {
return new RedisKeyValueState(namespace, jedisPoolConfig, getKeySerializer(topoConf, context, config), getValueSerializer(topoConf, context, config));
} else {
return new RedisKeyValueState(namespace, jedisClusterConfig, getKeySerializer(topoConf, context, config), getValueSerializer(topoConf, context, config));
}
}
use of org.apache.storm.redis.common.config.JedisClusterConfig in project storm by apache.
the class WordCountTridentRedisClusterMap method buildTopology.
public static StormTopology buildTopology(String redisHostPort) {
Fields fields = new Fields("word", "count");
FixedBatchSpout spout = new FixedBatchSpout(fields, 4, new Values("storm", 1), new Values("trident", 1), new Values("needs", 1), new Values("javadoc", 1));
spout.setCycle(true);
Set<InetSocketAddress> nodes = new HashSet<InetSocketAddress>();
for (String hostPort : redisHostPort.split(",")) {
String[] hostPortSplit = hostPort.split(":");
nodes.add(new InetSocketAddress(hostPortSplit[0], Integer.valueOf(hostPortSplit[1])));
}
JedisClusterConfig clusterConfig = new JedisClusterConfig.Builder().setNodes(nodes).build();
RedisDataTypeDescription dataTypeDescription = new RedisDataTypeDescription(RedisDataTypeDescription.RedisDataType.HASH, "test");
StateFactory factory = RedisClusterMapState.transactional(clusterConfig, dataTypeDescription);
TridentTopology topology = new TridentTopology();
Stream stream = topology.newStream("spout1", spout);
TridentState state = stream.groupBy(new Fields("word")).persistentAggregate(factory, new Fields("count"), new Sum(), new Fields("sum"));
stream.stateQuery(state, new Fields("word"), new MapGet(), new Fields("sum")).each(new Fields("word", "sum"), new PrintFunction(), new Fields());
return topology.build();
}
use of org.apache.storm.redis.common.config.JedisClusterConfig in project storm by apache.
the class RedisDataSourcesProvider method constructTrident.
@Override
public ISqlTridentDataSource constructTrident(URI uri, String inputFormatClass, String outputFormatClass, Properties props, List<FieldInfo> fields) {
Preconditions.checkArgument(JedisURIHelper.isValid(uri), "URI is not valid for Redis: " + uri);
String host = uri.getHost();
int port = uri.getPort() != -1 ? uri.getPort() : DEFAULT_REDIS_PORT;
int dbIdx = JedisURIHelper.getDBIndex(uri);
String password = JedisURIHelper.getPassword(uri);
int timeout = Integer.parseInt(props.getProperty("redis.timeout", String.valueOf(DEFAULT_TIMEOUT)));
boolean clusterMode = Boolean.valueOf(props.getProperty("use.redis.cluster", "false"));
List<String> fieldNames = FieldInfoUtils.getFieldNames(fields);
IOutputSerializer serializer = SerdeUtils.getSerializer(outputFormatClass, props, fieldNames);
if (clusterMode) {
JedisClusterConfig config = new JedisClusterConfig.Builder().setNodes(Collections.singleton(new InetSocketAddress(host, port))).setTimeout(timeout).build();
return new RedisClusterTridentDataSource(config, props, fields, serializer);
} else {
JedisPoolConfig config = new JedisPoolConfig(host, port, timeout, password, dbIdx);
return new RedisTridentDataSource(config, props, fields, serializer);
}
}
use of org.apache.storm.redis.common.config.JedisClusterConfig in project storm by apache.
the class RedisDataSourcesProvider method constructStreams.
@Override
public ISqlStreamsDataSource constructStreams(URI uri, String inputFormatClass, String outputFormatClass, Properties props, List<FieldInfo> fields) {
Preconditions.checkArgument(JedisURIHelper.isValid(uri), "URI is not valid for Redis: " + uri);
String host = uri.getHost();
int port = uri.getPort() != -1 ? uri.getPort() : DEFAULT_REDIS_PORT;
int dbIdx = JedisURIHelper.getDBIndex(uri);
String password = JedisURIHelper.getPassword(uri);
int timeout = Integer.parseInt(props.getProperty(PROPERTY_REDIS_TIMEOUT, String.valueOf(DEFAULT_TIMEOUT)));
boolean clusterMode = Boolean.valueOf(props.getProperty(PROPERTY_USE_REDIS_CLUSTER, "false"));
List<String> fieldNames = FieldInfoUtils.getFieldNames(fields);
IOutputSerializer serializer = SerdeUtils.getSerializer(outputFormatClass, props, fieldNames);
if (clusterMode) {
JedisClusterConfig config = new JedisClusterConfig.Builder().setNodes(Collections.singleton(new InetSocketAddress(host, port))).setTimeout(timeout).build();
return new RedisClusterStreamsDataSource(config, props, fields, serializer);
} else {
JedisPoolConfig config = new JedisPoolConfig(host, port, timeout, password, dbIdx);
return new RedisStreamsDataSource(config, props, fields, serializer);
}
}
use of org.apache.storm.redis.common.config.JedisClusterConfig in project storm by apache.
the class WordCountTridentRedisCluster method buildTopology.
public static StormTopology buildTopology(String redisHostPort) {
Fields fields = new Fields("word", "count");
FixedBatchSpout spout = new FixedBatchSpout(fields, 4, new Values("storm", 1), new Values("trident", 1), new Values("needs", 1), new Values("javadoc", 1));
spout.setCycle(true);
Set<InetSocketAddress> nodes = new HashSet<InetSocketAddress>();
for (String hostPort : redisHostPort.split(",")) {
String[] hostPortSplit = hostPort.split(":");
nodes.add(new InetSocketAddress(hostPortSplit[0], Integer.valueOf(hostPortSplit[1])));
}
JedisClusterConfig clusterConfig = new JedisClusterConfig.Builder().setNodes(nodes).build();
RedisStoreMapper storeMapper = new WordCountStoreMapper();
RedisLookupMapper lookupMapper = new WordCountLookupMapper();
RedisClusterState.Factory factory = new RedisClusterState.Factory(clusterConfig);
TridentTopology topology = new TridentTopology();
Stream stream = topology.newStream("spout1", spout);
stream.partitionPersist(factory, fields, new RedisClusterStateUpdater(storeMapper).withExpire(86400000), new Fields());
TridentState state = topology.newStaticState(factory);
stream = stream.stateQuery(state, new Fields("word"), new RedisClusterStateQuerier(lookupMapper), new Fields("columnName", "columnValue"));
stream.each(new Fields("word", "columnValue"), new PrintFunction(), new Fields());
return topology.build();
}
Aggregations