use of redis.clients.jedis.JedisClusterPipeline in project bazel-buildfarm by bazelbuild.
the class RedisShardBackplane method scanActionCache.
@SuppressWarnings("ConstantConditions")
@Override
public ActionCacheScanResult scanActionCache(String scanToken, int count) throws IOException {
final String jedisScanToken = scanToken == null ? SCAN_POINTER_START : scanToken;
ImmutableList.Builder<Map.Entry<ActionKey, String>> results = new ImmutableList.Builder<>();
ScanParams scanParams = new ScanParams().match(config.getActionCachePrefix() + ":*").count(count);
String token = client.call(jedis -> {
ScanResult<String> scanResult = jedis.scan(jedisScanToken, scanParams);
List<String> keyResults = scanResult.getResult();
List<Response<String>> actionResults = new ArrayList<>(keyResults.size());
JedisClusterPipeline p = jedis.pipelined();
for (String key : keyResults) {
actionResults.add(p.get(key));
}
p.sync();
for (int i = 0; i < keyResults.size(); i++) {
String json = actionResults.get(i).get();
if (json == null) {
continue;
}
String key = keyResults.get(i);
results.add(new AbstractMap.SimpleEntry<>(DigestUtil.asActionKey(DigestUtil.parseDigest(key.split(":")[1])), json));
}
String cursor = scanResult.getCursor();
return cursor.equals(SCAN_POINTER_START) ? null : cursor;
});
return new ActionCacheScanResult(token, results.build().stream().map((entry) -> new AbstractMap.SimpleEntry<>(entry.getKey(), parseActionResult(entry.getValue()))).collect(Collectors.toList()));
}
use of redis.clients.jedis.JedisClusterPipeline in project bazel-buildfarm by bazelbuild.
the class RedisShardBackplane method getDispatchedOperations.
@SuppressWarnings("ConstantConditions")
@Override
public ImmutableList<DispatchedOperation> getDispatchedOperations() throws IOException {
ImmutableList.Builder<DispatchedOperation> builder = new ImmutableList.Builder<>();
Map<String, String> dispatchedOperations = client.call(jedis -> jedis.hgetAll(config.getDispatchedOperationsHashName()));
ImmutableList.Builder<String> invalidOperationNames = new ImmutableList.Builder<>();
boolean hasInvalid = false;
// executor work queue?
for (Map.Entry<String, String> entry : dispatchedOperations.entrySet()) {
try {
DispatchedOperation.Builder dispatchedOperationBuilder = DispatchedOperation.newBuilder();
JsonFormat.parser().merge(entry.getValue(), dispatchedOperationBuilder);
builder.add(dispatchedOperationBuilder.build());
} catch (InvalidProtocolBufferException e) {
logger.log(Level.SEVERE, "RedisShardBackplane::getDispatchedOperations: removing invalid operation " + entry.getKey(), e);
/* guess we don't want to spin on this */
invalidOperationNames.add(entry.getKey());
hasInvalid = true;
}
}
if (hasInvalid) {
client.run(jedis -> {
JedisClusterPipeline p = jedis.pipelined();
for (String invalidOperationName : invalidOperationNames.build()) {
p.hdel(config.getDispatchedOperationsHashName(), invalidOperationName);
}
p.sync();
});
}
return builder.build();
}
use of redis.clients.jedis.JedisClusterPipeline in project bazel-buildfarm by bazelbuild.
the class RedisShardBackplane method updateWatchedIfDone.
@SuppressWarnings({ "unchecked", "rawtypes" })
public void updateWatchedIfDone(JedisCluster jedis) {
List<String> operationChannels = subscriber.watchedOperationChannels();
if (operationChannels.isEmpty()) {
return;
}
Instant now = Instant.now();
List<Map.Entry<String, Response<String>>> operations = new ArrayList(operationChannels.size());
JedisClusterPipeline p = jedis.pipelined();
for (String operationName : operationChannels.stream().map(RedisShardBackplane::parseOperationChannel).collect(Collectors.toList())) {
operations.add(new AbstractMap.SimpleEntry<>(operationName, p.get(operationKey(operationName))));
}
p.sync();
for (Map.Entry<String, Response<String>> entry : operations) {
String json = entry.getValue().get();
Operation operation = json == null ? null : RedisShardBackplane.parseOperationJson(json);
String operationName = entry.getKey();
if (operation == null || operation.getDone()) {
if (operation != null) {
operation = onPublish.apply(operation);
}
subscriber.onOperation(operationChannel(operationName), operation, nextExpiresAt(now));
logger.log(Level.FINE, format("operation %s done due to %s", operationName, operation == null ? "null" : "completed"));
}
}
}
use of redis.clients.jedis.JedisClusterPipeline in project bazel-buildfarm by bazelbuild.
the class JedisCasWorkerMap method addAll.
/**
* @brief Update multiple blob entries for a worker.
* @details This may add a new key if the blob did not previously exist, or it will adjust the
* worker values based on the worker name. The expiration time is always refreshed.
* @param client Client used for interacting with redis when not using cacheMap.
* @param blobDigests The blob digests to adjust worker information from.
* @param workerName The worker to add for looking up the blobs.
*/
@Override
public void addAll(RedisClient client, Iterable<Digest> blobDigests, String workerName) throws IOException {
client.run(jedis -> {
JedisClusterPipeline p = jedis.pipelined();
for (Digest blobDigest : blobDigests) {
String key = redisCasKey(blobDigest);
p.sadd(key, workerName);
p.expire(key, keyExpiration_s);
}
p.sync();
});
}
use of redis.clients.jedis.JedisClusterPipeline in project bazel-buildfarm by bazelbuild.
the class JedisCasWorkerMap method removeAll.
/**
* @brief Remove worker value from all blob keys.
* @details If the blob is already missing, or the worker doesn't exist, this will be no effect on
* the key.
* @param client Client used for interacting with redis when not using cacheMap.
* @param blobDigests The blob digests to remove the worker from.
* @param workerName The worker name to remove.
*/
@Override
public void removeAll(RedisClient client, Iterable<Digest> blobDigests, String workerName) throws IOException {
client.run(jedis -> {
JedisClusterPipeline p = jedis.pipelined();
for (Digest blobDigest : blobDigests) {
String key = redisCasKey(blobDigest);
p.srem(key, workerName);
}
p.sync();
});
}
Aggregations