use of com.ibm.etcd.api.RangeRequest in project etcd-java by IBM.
the class RangeCache method fullRefreshCache.
// internal method - should not be called while watch is active
protected ListenableFuture<Boolean> fullRefreshCache() {
ListenableFuture<List<RangeResponse>> rrfut;
long seenUpTo = seenUpToRev;
boolean firstTime = (seenUpTo == 0L);
if (firstTime || entries.size() <= 20) {
// TODO *maybe* chunking (for large caches)
ListenableFuture<RangeResponse> rrf = kvClient.get(fromKey).rangeEnd(toKey).backoffRetry(() -> !closed).timeout(300_000L).async();
rrfut = Futures.transform(rrf, (Function<RangeResponse, List<RangeResponse>>) rr -> Collections.singletonList(rr));
} else {
// in case the local cache is large, reduce data transfer by requesting
// just keys, and full key+value only for those modified since seenUpToRev
RangeRequest.Builder rangeReqBld = RangeRequest.newBuilder().setKey(fromKey).setRangeEnd(toKey);
RangeRequest newModsReq = rangeReqBld.setMinModRevision(seenUpTo + 1).build();
RangeRequest otherKeysReq = rangeReqBld.clearMinModRevision().setMaxModRevision(seenUpTo).setKeysOnly(true).build();
ListenableFuture<TxnResponse> trf = kvClient.batch().get(newModsReq).get(otherKeysReq).backoffRetry(() -> !closed).timeout(300_000L).async();
rrfut = Futures.transform(trf, (Function<TxnResponse, List<RangeResponse>>) tr -> tr.getResponsesList().stream().map(r -> r.getResponseRange()).collect(Collectors.toList()));
}
return Futures.transformAsync(rrfut, rrs -> {
if (closed)
throw new CancellationException();
Set<ByteString> snapshot = firstTime ? null : new HashSet<>();
RangeResponse toUpdate = rrs.get(0);
if (toUpdate.getKvsCount() > 0)
for (KeyValue kv : toUpdate.getKvsList()) {
if (!firstTime)
snapshot.add(kv.getKey());
offerUpdate(kv, true);
}
long snapshotRev = toUpdate.getHeader().getRevision();
if (firstTime)
notifyListeners(EventType.INITIALIZED, null, true);
else {
if (rrs.size() > 1)
for (KeyValue kv : rrs.get(1).getKvsList()) {
snapshot.add(kv.getKey());
}
// prune deleted entries
KeyValue.Builder kvBld = null;
for (ByteString key : entries.keySet()) if (!snapshot.contains(key)) {
if (kvBld == null)
kvBld = KeyValue.newBuilder().setVersion(0L).setModRevision(snapshotRev);
offerUpdate(kvBld.setKey(key).build(), true);
}
}
revisionUpdate(snapshotRev);
Watch newWatch = // .prevKv() //TODO TBD
kvClient.watch(fromKey).rangeEnd(toKey).progressNotify().startRevision(snapshotRev + 1).executor(listenerExecutor).start(new StreamObserver<WatchUpdate>() {
@Override
public void onNext(WatchUpdate update) {
List<Event> events = update.getEvents();
int eventCount = events != null ? events.size() : 0;
if (eventCount > 0)
for (Event event : events) {
KeyValue kv = event.getKv();
// event.getPrevKv(); //TBD
switch(event.getType()) {
case DELETE:
if (kv.getVersion() != 0L)
kv = KeyValue.newBuilder(kv).setVersion(0L).clearValue().build();
// fall-thru
case PUT:
offerUpdate(kv, true);
break;
case UNRECOGNIZED:
default:
logger.warn("Unrecognized event for key " + kv.getKey().toStringUtf8());
break;
}
}
revisionUpdate(eventCount == 0 ? update.getHeader().getRevision() - 1L : events.get(eventCount - 1).getKv().getModRevision());
}
@Override
public void onCompleted() {
// should only happen after external close()
if (!closed) {
if (!client.isClosed()) {
logger.error("Watch completed unexpectedly (not closed)");
}
close();
}
}
@Override
public void onError(Throwable t) {
logger.error("Watch failed with exception ", t);
if (t instanceof RevisionCompactedException)
synchronized (RangeCache.this) {
// fail if happens during start, otherwise refresh
if (!closed && startFuture != null && startFuture.isDone()) {
// will renew watch
startFuture = fullRefreshCache();
}
}
}
});
synchronized (this) {
if (closed)
throw new CancellationException();
return watch = newWatch;
}
}, listenerExecutor);
}
use of com.ibm.etcd.api.RangeRequest in project etcd-java by IBM.
the class PersistentLeaseKey method putKey.
// called only from our serialized executor context
protected void putKey(long leaseId) {
if (leaseId == 0L || closeFuture != null)
return;
if (updateFuture != null && !updateFuture.isDone()) {
// if the cancellation wins then putKey will be immediately retried
updateFuture.cancel(false);
return;
}
// execute a transaction which either sets the lease on an existing key
// or creates the key with the lease if it doesn't exist
PutRequest.Builder putBld = PutRequest.newBuilder().setKey(key).setLease(leaseId);
KvClient.FluentTxnRequest req = client.getKvClient().txnIf().exists(key).backoffRetry(() -> closeFuture == null && isActive());
ListenableFuture<? extends Object> fut;
ListenableFuture<TxnResponse> txnFut;
if (rangeCache == null) {
fut = txnFut = req.then().put(putBld.setIgnoreValue(true)).elseDo().put(putBld.setIgnoreValue(false).setValue(defaultValue)).async();
} else {
RangeRequest getOp = RangeRequest.newBuilder().setKey(key).build();
txnFut = req.then().put(putBld.setIgnoreValue(true)).get(getOp).elseDo().put(putBld.setIgnoreValue(false).setValue(defaultValue)).get(getOp).async();
fut = Futures.transform(txnFut, (Function<TxnResponse, Object>) tr -> rangeCache.offerUpdate(tr.getResponses(1).getResponseRange().getKvs(0), false));
}
if (!isDone())
fut = Futures.transform(fut, (Function<Object, Object>) r -> set(key));
// this callback is to trigger an immediate retry in case the attempt was cancelled by a more
// recent lease state change to active
Futures.addCallback(fut, (FutureListener<Object>) (v, t) -> {
if (t instanceof CancellationException && isActive())
putKey(leaseId);
}, executor);
updateFuture = fut;
}
use of com.ibm.etcd.api.RangeRequest in project etcd-java by IBM.
the class RangeCache method strongIterator.
/**
* Iterator whose contents is guaranteed to be sequentially consistent
* with remote updates to the cached range.
*
* @return an {@link Iterator} over the {@link KeyValues} of this cache
*/
public Iterator<KeyValue> strongIterator() {
// memory barrier prior to reading seenUpToRev
entries.get(fromKey);
long seenUpTo = seenUpToRev;
if (seenUpTo == 0L) {
ListenableFuture<Boolean> startFut;
synchronized (this) {
startFut = startFuture;
}
if (startFut == null) {
// cache has not yet been started
return kvClient.get(fromKey).rangeEnd(toKey).timeout(120_000L).sync().getKvsList().iterator();
} else
try {
startFut.get(2L, TimeUnit.MINUTES);
// now started
seenUpTo = seenUpToRev;
} catch (TimeoutException te) {
throw Status.DEADLINE_EXCEEDED.asRuntimeException();
} catch (ExecutionException e) {
throw Status.UNKNOWN.withCause(e).asRuntimeException();
} catch (InterruptedException | CancellationException e) {
throw Status.CANCELLED.withCause(e).asRuntimeException();
}
}
/*
* This logic is similar to that in fullRefreshCache(), but
* it includes an optimistic initial comparison of counts
* to identify cases where no deletions have been missed and
* thus a retrieval of all the keys isn't required.
*/
RangeRequest.Builder rangeReqBld = RangeRequest.newBuilder().setKey(fromKey).setRangeEnd(toKey);
RangeRequest curCountReq = rangeReqBld.setCountOnly(true).setMaxCreateRevision(seenUpTo).build();
RangeRequest seenCountReq = rangeReqBld.clearMaxCreateRevision().setRevision(seenUpTo).build();
RangeRequest newModsReq = rangeReqBld.clearRevision().clearCountOnly().setMinModRevision(seenUpTo + 1).build();
// first, attempt to get:
// 0- kvs modified since seenUpTo
// 1- current count excluding those created since seenUpTo
// 2- count at revision seenUpTo (this could potentially
// fail with compaction error, see below)
ListenableFuture<TxnResponse> txn = kvClient.batch().get(newModsReq).get(curCountReq).get(seenCountReq).async();
TxnResponse txnResp;
try {
txnResp = waitFor(txn, 8000L);
} catch (RuntimeException e) {
Code code = Status.fromThrowable(e).getCode();
if (code != Code.OUT_OF_RANGE)
throw e;
// if (2) above fails due to compaction, also retrieve all current keys
RangeRequest otherKeysReq = rangeReqBld.clearMinModRevision().setMaxModRevision(seenUpTo).setKeysOnly(true).build();
txnResp = waitFor(kvClient.batch().get(newModsReq).get(otherKeysReq).async(), // longer timeout
60_000L);
}
long revNow = txnResp.getHeader().getRevision();
if (revNow > seenUpToRev) {
RangeResponse newModKvs = txnResp.getResponses(0).getResponseRange();
List<KeyValue> otherKeys;
if (txnResp.getResponsesCount() == 2) {
// this means we must have taken the compacted exception path above
otherKeys = txnResp.getResponses(1).getResponseRange().getKvsList();
} else if (// <- latest count
txnResp.getResponses(1).getResponseRange().getCount() < txnResp.getResponses(2).getResponseRange().getCount()) {
// <- count at seenUpTo
// if counts don't match, there must have been deletions since seenUpTo,
// so additionally retrieve all current keys
RangeRequest otherKeysReq = rangeReqBld.clearMinModRevision().setMaxModRevision(seenUpTo).setKeysOnly(true).build();
// longer timeout
otherKeys = waitFor(kvClient.get(otherKeysReq), 60_000L).getKvsList();
} else
otherKeys = null;
boolean newKvs = newModKvs.getKvsCount() > 0;
if (otherKeys != null) {
// if this is true, there *might* be deletions to process
if (otherKeys.isEmpty() && !newKvs)
return Collections.emptyIterator();
// bring cache up to date with recently deleted kvs
Set<ByteString> keys = Stream.concat(otherKeys.stream(), newModKvs.getKvsList().stream()).map(kv -> kv.getKey()).collect(Collectors.toSet());
entries.values().stream().filter(kv -> kv.getModRevision() < revNow && !keys.contains(kv.getKey())).forEach(kv -> offerDelete(kv.getKey(), revNow));
}
// bring cache up to date with recently modified kvs
if (newKvs)
newModKvs.getKvsList().forEach(kv -> offerUpdate(kv, false));
if (revNow > seenUpToRev)
listenerExecutor.execute(() -> revisionUpdate(revNow));
}
return iterator();
}
Aggregations