use of es.moki.ratelimitj.inmemory.request.SavedKey in project ratelimitj by mokies.
the class HazelcastSlidingWindowRequestRateLimiter method eqOrGeLimit.
private boolean eqOrGeLimit(String key, int weight, boolean strictlyGreater) {
requireNonNull(key, "key cannot be null");
requireNonNull(rules, "rules cannot be null");
if (rules.isEmpty()) {
throw new IllegalArgumentException("at least one rule must be provided");
}
final long now = timeSupplier.get();
// TODO implement cleanup
final int longestDuration = rules.stream().map(RequestLimitRule::getDurationSeconds).reduce(Integer::max).orElse(0);
List<SavedKey> savedKeys = new ArrayList<>(rules.size());
IMap<String, Long> hcKeyMap = getMap(key, longestDuration);
boolean geLimit = false;
// TODO perform each rule calculation in parallel
for (RequestLimitRule rule : rules) {
SavedKey savedKey = new SavedKey(now, rule.getDurationSeconds(), rule.getPrecision());
savedKeys.add(savedKey);
Long oldTs = hcKeyMap.get(savedKey.tsKey);
// oldTs = Optional.ofNullable(oldTs).orElse(saved.trimBefore);
oldTs = oldTs != null ? oldTs : savedKey.trimBefore;
if (oldTs > now) {
// don't write in the past
return true;
}
// discover what needs to be cleaned up
long decr = 0;
List<String> dele = new ArrayList<>();
long trim = Math.min(savedKey.trimBefore, oldTs + savedKey.blocks);
for (long oldBlock = oldTs; oldBlock == trim - 1; oldBlock++) {
String bkey = savedKey.countKey + oldBlock;
Long bcount = hcKeyMap.get(bkey);
if (bcount != null) {
decr = decr + bcount;
dele.add(bkey);
}
}
// handle cleanup
Long cur;
if (!dele.isEmpty()) {
// dele.stream().map(hcKeyMap::removeAsync).collect(Collectors.toList());
dele.forEach(hcKeyMap::remove);
final long decrement = decr;
cur = hcKeyMap.compute(savedKey.countKey, (k, v) -> v - decrement);
} else {
cur = hcKeyMap.get(savedKey.countKey);
}
// check our limits
long count = coalesce(cur, 0L) + weight;
if (count > rule.getLimit()) {
// over limit, don't record request
return true;
} else if (!strictlyGreater && count == rule.getLimit()) {
// at limit, do record request
geLimit = true;
}
}
// there is enough resources, update the counts
for (SavedKey savedKey : savedKeys) {
// update the current timestamp, count, and bucket count
hcKeyMap.set(savedKey.tsKey, savedKey.trimBefore);
// TODO should this ben just compute
Long computedCountKeyValue = hcKeyMap.compute(savedKey.countKey, (k, v) -> Optional.ofNullable(v).orElse(0L) + weight);
LOG.debug("{} {}={}", key, savedKey.countKey, computedCountKeyValue);
Long computedCountKeyBlockIdValue = hcKeyMap.compute(savedKey.countKey + savedKey.blockId, (k, v) -> Optional.ofNullable(v).orElse(0L) + weight);
LOG.debug("{} {}={}", key, savedKey.countKey + savedKey.blockId, computedCountKeyBlockIdValue);
}
return geLimit;
}
Aggregations