use of org.apache.cassandra.utils.ColumnOrSuperColumnHelper.EvtAndLvt in project eiger by wlloyd.
the class ClientLibrary method forced_2round_multiget_slice.
//this version is for micro-benchmarking only
public Map<ByteBuffer, List<ColumnOrSuperColumn>> forced_2round_multiget_slice(List<ByteBuffer> allKeys, ColumnParent column_parent, SlicePredicate predicate) throws Exception {
//if (logger.isTraceEnabled()) {
// logger.trace("forced_2round_multiget_slice(allKeys = {}, column_parent = {}, predicate = {})", new Object[]{printKeys(allKeys), column_parent, predicate});
//}
//Split up into one request for each server in the local cluster
Map<Cassandra.AsyncClient, List<ByteBuffer>> asyncClientToFirstRoundKeys = partitionByAsyncClients(allKeys);
//Send Round 1 Requests
Queue<BlockingQueueCallback<multiget_slice_call>> firstRoundCallbacks = new LinkedList<BlockingQueueCallback<multiget_slice_call>>();
for (Entry<Cassandra.AsyncClient, List<ByteBuffer>> entry : asyncClientToFirstRoundKeys.entrySet()) {
Cassandra.AsyncClient asyncClient = entry.getKey();
List<ByteBuffer> keysForThisClient = entry.getValue();
BlockingQueueCallback<multiget_slice_call> callback = new BlockingQueueCallback<multiget_slice_call>();
firstRoundCallbacks.add(callback);
//if (logger.isTraceEnabled()) { logger.trace("round 1: get " + printKeys(keysForThisClient) + " from " + asyncClient); }
asyncClient.multiget_slice(keysForThisClient, column_parent, predicate, consistencyLevel, LamportClock.sendTimestamp(), callback);
}
//Gather responses, track both max_evt and min_lvt
long overallMaxEvt = Long.MIN_VALUE;
long overallMinLvt = Long.MAX_VALUE;
Map<ByteBuffer, List<ColumnOrSuperColumn>> keyToResult = new HashMap<ByteBuffer, List<ColumnOrSuperColumn>>();
NavigableMap<Long, List<ByteBuffer>> lvtToKeys = new TreeMap<Long, List<ByteBuffer>>();
for (BlockingQueueCallback<multiget_slice_call> callback : firstRoundCallbacks) {
MultigetSliceResult result = callback.getResponseNoInterruption().getResult();
LamportClock.updateTime(result.lts);
for (Entry<ByteBuffer, List<ColumnOrSuperColumn>> entry : result.value.entrySet()) {
ByteBuffer key = entry.getKey();
List<ColumnOrSuperColumn> coscList = entry.getValue();
keyToResult.put(key, coscList);
//find the evt and lvt for the entire row
EvtAndLvt evtAndLvt = ColumnOrSuperColumnHelper.extractEvtAndLvt(coscList);
if (!lvtToKeys.containsKey(evtAndLvt.getLatestValidTime())) {
lvtToKeys.put(evtAndLvt.getLatestValidTime(), new LinkedList<ByteBuffer>());
}
lvtToKeys.get(evtAndLvt.getLatestValidTime()).add(key);
//if (logger.isTraceEnabled()) { logger.trace("round 1 response for " + printKey(key) + " evt: " + evtAndLvt.getEarliestValidTime() + " lvt: " + evtAndLvt.getLatestValidTime()); }
overallMaxEvt = Math.max(overallMaxEvt, evtAndLvt.getEarliestValidTime());
overallMinLvt = Math.min(overallMinLvt, evtAndLvt.getLatestValidTime());
}
}
//Always Execute 2nd round for micro-benchmarking
if (true) {
//get the smallest lvt > maxEvt
long chosenTime = lvtToKeys.navigableKeySet().higher(overallMaxEvt);
List<ByteBuffer> secondRoundKeys = new LinkedList<ByteBuffer>();
secondRoundKeys.addAll(allKeys);
//Send Round 2 Requests
Map<Cassandra.AsyncClient, List<ByteBuffer>> asyncClientToSecondRoundKeys = partitionByAsyncClients(secondRoundKeys);
Queue<BlockingQueueCallback<multiget_slice_by_time_call>> secondRoundCallbacks = new LinkedList<BlockingQueueCallback<multiget_slice_by_time_call>>();
for (Entry<Cassandra.AsyncClient, List<ByteBuffer>> entry : asyncClientToSecondRoundKeys.entrySet()) {
Cassandra.AsyncClient asyncClient = entry.getKey();
List<ByteBuffer> keysForThisClient = entry.getValue();
BlockingQueueCallback<multiget_slice_by_time_call> callback = new BlockingQueueCallback<multiget_slice_by_time_call>();
secondRoundCallbacks.add(callback);
//if (logger.isTraceEnabled()) { logger.trace("round 2: get " + printKeys(keysForThisClient) + " from " + asyncClient); }
asyncClient.multiget_slice_by_time(keysForThisClient, column_parent, predicate, consistencyLevel, chosenTime, LamportClock.sendTimestamp(), callback);
}
//Gather second round responses
for (BlockingQueueCallback<multiget_slice_by_time_call> callback : secondRoundCallbacks) {
MultigetSliceResult result = callback.getResponseNoInterruption().getResult();
LamportClock.updateTime(result.lts);
substituteValidFirstRoundResults(result, keyToResult);
//if (logger.isTraceEnabled()) { logger.trace("round 2 responses for " + printKeys(result.getValue().keySet())); }
keyToResult.putAll(result.getValue());
}
}
//Add dependencies on anything returned and removed deleted columns
for (Entry<ByteBuffer, List<ColumnOrSuperColumn>> entry : keyToResult.entrySet()) {
ByteBuffer key = entry.getKey();
List<ColumnOrSuperColumn> coscList = entry.getValue();
for (Iterator<ColumnOrSuperColumn> cosc_it = coscList.iterator(); cosc_it.hasNext(); ) {
ColumnOrSuperColumn cosc = cosc_it.next();
try {
clientContext.addDep(key, cosc);
} catch (NotFoundException nfe) {
//remove deleted results, it's okay for all result to be removed
cosc_it.remove();
}
}
}
//}
return keyToResult;
}
use of org.apache.cassandra.utils.ColumnOrSuperColumnHelper.EvtAndLvt in project eiger by wlloyd.
the class CassandraServer method multiget_count.
@Override
public MultigetCountResult multiget_count(List<ByteBuffer> keys, ColumnParent column_parent, SlicePredicate predicate, ConsistencyLevel consistency_level, long lts) throws InvalidRequestException, UnavailableException, TimedOutException {
LamportClock.updateTime(lts);
logger.debug("multiget_count");
state().hasColumnFamilyAccess(column_parent.column_family, Permission.READ);
String keyspace = state().getKeyspace();
Map<ByteBuffer, CountWithMetadata> results = new HashMap<ByteBuffer, CountWithMetadata>();
Map<ByteBuffer, List<ColumnOrSuperColumn>> columnFamiliesMap = multigetSliceInternal(keyspace, keys, column_parent, predicate, consistency_level);
for (Map.Entry<ByteBuffer, List<ColumnOrSuperColumn>> cf : columnFamiliesMap.entrySet()) {
//excludes deleted columns from the count; calculates dependencies (including deleted columns), evt, and lvt
//use a clientContext to simplify calculating deps
ClientContext countContext = new ClientContext();
long maxEarliestValidTime = Long.MIN_VALUE;
long minLatestValidTime = Long.MAX_VALUE;
for (Iterator<ColumnOrSuperColumn> cosc_it = cf.getValue().iterator(); cosc_it.hasNext(); ) {
ColumnOrSuperColumn cosc = cosc_it.next();
EvtAndLvt evtAndLvt = ColumnOrSuperColumnHelper.extractEvtAndLvt(cosc);
maxEarliestValidTime = Math.max(maxEarliestValidTime, evtAndLvt.getEarliestValidTime());
minLatestValidTime = Math.min(minLatestValidTime, evtAndLvt.getLatestValidTime());
try {
countContext.addDep(cf.getKey(), cosc);
} catch (NotFoundException nfe) {
cosc_it.remove();
}
}
results.put(cf.getKey(), new CountWithMetadata(cf.getValue().size(), maxEarliestValidTime, minLatestValidTime, countContext.getDeps()));
}
if (logger.isTraceEnabled()) {
logger.trace("multiget_count({}, {}, {}, {}, {}) = {}", new Object[] { ByteBufferUtil.listBytesToHex(keys), column_parent, predicate, consistency_level, lts, results });
}
return new MultigetCountResult(results, LamportClock.sendTimestamp());
}
use of org.apache.cassandra.utils.ColumnOrSuperColumnHelper.EvtAndLvt in project eiger by wlloyd.
the class ClientLibrary method transactional_multiget_slice.
//this version is for testing only
public Map<ByteBuffer, List<ColumnOrSuperColumn>> transactional_multiget_slice(List<ByteBuffer> allKeys, ColumnParent column_parent, SlicePredicate predicate, CopsTestingConcurrentWriteHook afterFirstReadWriteHook, CopsTestingConcurrentWriteHook afterFirstRoundWriteHook) throws Exception {
//if (logger.isTraceEnabled()) {
// logger.trace("transactional_multiget_slice(allKeys = {}, column_parent = {}, predicate = {}, afterFirstReadWriteHook = {}, afterFirstRoundWriteHook = {})", new Object[]{printKeys(allKeys), column_parent, predicate, afterFirstReadWriteHook, afterFirstRoundWriteHook});
//}
//Split up into one request for each server in the local cluster
Map<Cassandra.AsyncClient, List<ByteBuffer>> asyncClientToFirstRoundKeys = partitionByAsyncClients(allKeys);
//testing only logic -- ensure we can create a 2 rounds situation by sending 1st round request in at least 2 batches
List<ByteBuffer> laterKeys = null;
if (afterFirstReadWriteHook != null && asyncClientToFirstRoundKeys.size() == 1) {
assert allKeys.size() > 1 : "Must have more than 1 key to split up the first round";
//logger.trace("Splitting keys to ensure concurrent writes");
laterKeys = allKeys.subList(0, allKeys.size() - 1);
asyncClientToFirstRoundKeys = partitionByAsyncClients(allKeys.subList(allKeys.size() - 1, allKeys.size()));
}
if (afterFirstReadWriteHook != null || afterFirstRoundWriteHook != null) {
assert clientContext.getDeps().size() == 0 : "you must clear the clientContext before you use these testing hooks";
}
//Send Round 1 Requests
Queue<BlockingQueueCallback<multiget_slice_call>> firstRoundCallbacks = new LinkedList<BlockingQueueCallback<multiget_slice_call>>();
boolean firstIteration = true;
for (Entry<Cassandra.AsyncClient, List<ByteBuffer>> entry : asyncClientToFirstRoundKeys.entrySet()) {
Cassandra.AsyncClient asyncClient = entry.getKey();
List<ByteBuffer> keysForThisClient = entry.getValue();
BlockingQueueCallback<multiget_slice_call> callback = new BlockingQueueCallback<multiget_slice_call>();
firstRoundCallbacks.add(callback);
//if (logger.isTraceEnabled()) { logger.trace("round 1: get " + printKeys(keysForThisClient) + " from " + asyncClient); }
asyncClient.multiget_slice(keysForThisClient, column_parent, predicate, consistencyLevel, LamportClock.sendTimestamp(), callback);
//testing purposes only
if (firstIteration) {
if (afterFirstReadWriteHook != null) {
//busywait on allowing the first read to finish AND update LamportClock
while (true) {
multiget_slice_call response = callback.peekResponse();
if (response != null) {
LamportClock.updateTime(response.getResult().lts);
break;
}
}
//logger.trace("Issuing afterFirstRead writes during round 1");
afterFirstReadWriteHook.issueWrites();
clientContext.clearDeps();
if (laterKeys != null) {
BlockingQueueCallback<multiget_slice_call> laterCallback = new BlockingQueueCallback<multiget_slice_call>();
firstRoundCallbacks.add(laterCallback);
//logger.trace("round 1': get " + printKeys(laterKeys) + " from " + asyncClient);
asyncClient.multiget_slice(laterKeys, column_parent, predicate, consistencyLevel, LamportClock.sendTimestamp(), laterCallback);
}
}
firstIteration = false;
}
}
//testing purposes only
if (afterFirstRoundWriteHook != null) {
//busywait on allowing all the first reads to finish AND update LamportClock
for (BlockingQueueCallback<multiget_slice_call> callback : firstRoundCallbacks) {
while (true) {
multiget_slice_call response = callback.peekResponse();
if (response != null) {
LamportClock.updateTime(response.getResult().lts);
break;
}
}
}
//logger.trace("Issuing afterFirstRound writes between rounds");
afterFirstRoundWriteHook.issueWrites();
clientContext.clearDeps();
}
//Gather responses, track both max_evt and min_lvt
long overallMaxEvt = Long.MIN_VALUE;
long overallMinLvt = Long.MAX_VALUE;
Map<ByteBuffer, List<ColumnOrSuperColumn>> keyToResult = new HashMap<ByteBuffer, List<ColumnOrSuperColumn>>();
NavigableMap<Long, List<ByteBuffer>> lvtToKeys = new TreeMap<Long, List<ByteBuffer>>();
for (BlockingQueueCallback<multiget_slice_call> callback : firstRoundCallbacks) {
MultigetSliceResult result = callback.getResponseNoInterruption().getResult();
LamportClock.updateTime(result.lts);
for (Entry<ByteBuffer, List<ColumnOrSuperColumn>> entry : result.value.entrySet()) {
ByteBuffer key = entry.getKey();
List<ColumnOrSuperColumn> coscList = entry.getValue();
keyToResult.put(key, coscList);
//find the evt and lvt for the entire row
EvtAndLvt evtAndLvt = ColumnOrSuperColumnHelper.extractEvtAndLvt(coscList);
if (!lvtToKeys.containsKey(evtAndLvt.getLatestValidTime())) {
lvtToKeys.put(evtAndLvt.getLatestValidTime(), new LinkedList<ByteBuffer>());
}
lvtToKeys.get(evtAndLvt.getLatestValidTime()).add(key);
//if (logger.isTraceEnabled()) { logger.trace("round 1 response for " + printKey(key) + " evt: " + evtAndLvt.getEarliestValidTime() + " lvt: " + evtAndLvt.getLatestValidTime()); }
overallMaxEvt = Math.max(overallMaxEvt, evtAndLvt.getEarliestValidTime());
overallMinLvt = Math.min(overallMinLvt, evtAndLvt.getLatestValidTime());
}
}
//Execute 2nd round if necessary
if (overallMinLvt < overallMaxEvt) {
//get the smallest lvt > maxEvt
long chosenTime = lvtToKeys.navigableKeySet().higher(overallMaxEvt);
List<ByteBuffer> secondRoundKeys = new LinkedList<ByteBuffer>();
for (List<ByteBuffer> keyList : lvtToKeys.headMap(chosenTime).values()) {
secondRoundKeys.addAll(keyList);
}
//Send Round 2 Requests
Map<Cassandra.AsyncClient, List<ByteBuffer>> asyncClientToSecondRoundKeys = partitionByAsyncClients(secondRoundKeys);
Queue<BlockingQueueCallback<multiget_slice_by_time_call>> secondRoundCallbacks = new LinkedList<BlockingQueueCallback<multiget_slice_by_time_call>>();
for (Entry<Cassandra.AsyncClient, List<ByteBuffer>> entry : asyncClientToSecondRoundKeys.entrySet()) {
Cassandra.AsyncClient asyncClient = entry.getKey();
List<ByteBuffer> keysForThisClient = entry.getValue();
BlockingQueueCallback<multiget_slice_by_time_call> callback = new BlockingQueueCallback<multiget_slice_by_time_call>();
secondRoundCallbacks.add(callback);
//if (logger.isTraceEnabled()) { logger.trace("round 2: get " + printKeys(keysForThisClient) + " from " + asyncClient); }
asyncClient.multiget_slice_by_time(keysForThisClient, column_parent, predicate, consistencyLevel, chosenTime, LamportClock.sendTimestamp(), callback);
}
//Gather second round responses
overallMaxEvt = Long.MIN_VALUE;
overallMinLvt = Long.MAX_VALUE;
for (BlockingQueueCallback<multiget_slice_by_time_call> callback : secondRoundCallbacks) {
MultigetSliceResult result = callback.getResponseNoInterruption().getResult();
LamportClock.updateTime(result.lts);
substituteValidFirstRoundResults(result, keyToResult);
for (Entry<ByteBuffer, List<ColumnOrSuperColumn>> entry : result.value.entrySet()) {
ByteBuffer key = entry.getKey();
List<ColumnOrSuperColumn> coscList = entry.getValue();
//find the evt and lvt for the entire row
EvtAndLvt evtAndLvt = ColumnOrSuperColumnHelper.extractEvtAndLvt(coscList);
if (!lvtToKeys.containsKey(evtAndLvt.getLatestValidTime())) {
lvtToKeys.put(evtAndLvt.getLatestValidTime(), new LinkedList<ByteBuffer>());
}
lvtToKeys.get(evtAndLvt.getLatestValidTime()).add(key);
//if (logger.isTraceEnabled()) { logger.trace("round 2 response for " + printKey(key) + " evt: " + evtAndLvt.getEarliestValidTime() + " lvt: " + evtAndLvt.getLatestValidTime()); }
overallMaxEvt = Math.max(overallMaxEvt, evtAndLvt.getEarliestValidTime());
overallMinLvt = Math.min(overallMinLvt, evtAndLvt.getLatestValidTime());
}
keyToResult.putAll(result.getValue());
}
assert overallMaxEvt < overallMinLvt : overallMaxEvt + " !< " + overallMinLvt;
}
//Add dependencies on anything returned and removed deleted columns
for (Entry<ByteBuffer, List<ColumnOrSuperColumn>> entry : keyToResult.entrySet()) {
ByteBuffer key = entry.getKey();
List<ColumnOrSuperColumn> coscList = entry.getValue();
for (Iterator<ColumnOrSuperColumn> cosc_it = coscList.iterator(); cosc_it.hasNext(); ) {
ColumnOrSuperColumn cosc = cosc_it.next();
try {
clientContext.addDep(key, cosc);
} catch (NotFoundException nfe) {
//remove deleted results, it's okay for all result to be removed
cosc_it.remove();
}
}
}
//}
return keyToResult;
}
use of org.apache.cassandra.utils.ColumnOrSuperColumnHelper.EvtAndLvt in project eiger by wlloyd.
the class ClientLibrary method transactional_get_range_slices.
public List<KeySlice> transactional_get_range_slices(ColumnParent column_parent, SlicePredicate predicate, KeyRange range) throws Exception {
//if (logger.isTraceEnabled()) {
// logger.trace("transactional_get_range_slices(column_parent = {}, predicate = {}, range = {})", new Object[]{column_parent, predicate, range});
//}
//turn the KeyRange into AbstractBounds that are easier to reason about
AbstractBounds<Token> requestedRange;
if (range.start_key == null) {
Token.TokenFactory tokenFactory = partitioner.getTokenFactory();
Token left = tokenFactory.fromString(range.start_token);
Token right = tokenFactory.fromString(range.end_token);
requestedRange = new Bounds<Token>(left, right, partitioner);
} else {
AbstractBounds<RowPosition> rowPositionBounds = new Bounds<RowPosition>(RowPosition.forKey(range.start_key, partitioner), RowPosition.forKey(range.end_key, partitioner));
requestedRange = rowPositionBounds.toTokenBounds();
}
//Split up into one request for each server in the local cluster
Map<Cassandra.AsyncClient, List<Range<Token>>> asyncClientToRanges = new HashMap<Cassandra.AsyncClient, List<Range<Token>>>();
for (Entry<Range<Token>, InetAddress> entry : ringCache.getRangeMap().entries()) {
Range<Token> serverRange = entry.getKey();
InetAddress addr = entry.getValue();
Cassandra.AsyncClient asyncClient = addressToAsyncClient.get(addr);
if (asyncClient == null) {
//this addr is not in the local datacenter
continue;
}
// We want to restrict the range we ask for from each server to be
// the intersection of its range and the requested range
serverRange = intersect(serverRange, requestedRange);
if (serverRange == null) {
//no intersection, so nothing to request from this server
continue;
}
if (!asyncClientToRanges.containsKey(asyncClient)) {
asyncClientToRanges.put(asyncClient, new ArrayList<Range<Token>>());
}
asyncClientToRanges.get(asyncClient).add(serverRange);
}
//Need to merge the adjacent ranges into a single keyRange to request from each local server
Map<Cassandra.AsyncClient, KeyRange> asyncClientToKeyRange = new HashMap<Cassandra.AsyncClient, KeyRange>();
for (Entry<Cassandra.AsyncClient, List<Range<Token>>> entry : asyncClientToRanges.entrySet()) {
Cassandra.AsyncClient asyncClient = entry.getKey();
List<Range<Token>> rangeList = entry.getValue();
List<AbstractBounds<Token>> normalizedBounds = AbstractBounds.normalize(rangeList);
assert normalizedBounds.size() == 1 : "All parts of a server ranges should be adjacent : " + normalizedBounds;
AbstractBounds<Token<String>> serverRange = new Bounds<Token<String>>(normalizedBounds.get(0).left, normalizedBounds.get(0).right, partitioner);
//WL TODO: Should be a more elegant way to extract tokens
String leftToken = serverRange.left.toString();
String rightToken = serverRange.right.toString();
//Remove brackets from tokens (they only show up when we have the ByteOrderPartitioner I think)
if (leftToken.indexOf("[") != -1) {
leftToken = leftToken.substring(leftToken.indexOf("[") + 1, leftToken.indexOf("]"));
rightToken = rightToken.substring(rightToken.indexOf("[") + 1, rightToken.indexOf("]"));
}
KeyRange rangeForThisClient = new KeyRange();
rangeForThisClient.setStart_token(leftToken);
rangeForThisClient.setEnd_token(rightToken);
asyncClientToKeyRange.put(asyncClient, rangeForThisClient);
}
Queue<BlockingQueueCallback<get_range_slices_call>> firstRoundCallbacks = new LinkedList<BlockingQueueCallback<get_range_slices_call>>();
for (Entry<Cassandra.AsyncClient, KeyRange> entry : asyncClientToKeyRange.entrySet()) {
Cassandra.AsyncClient asyncClient = entry.getKey();
KeyRange rangeForThisClient = entry.getValue();
BlockingQueueCallback<get_range_slices_call> callback = new BlockingQueueCallback<get_range_slices_call>();
firstRoundCallbacks.add(callback);
asyncClient.get_range_slices(column_parent, predicate, rangeForThisClient, consistencyLevel, LamportClock.sendTimestamp(), callback);
}
//Gather responses, track both max_evt and min_lvt
long overallMaxEvt = Long.MIN_VALUE;
long overallMinLvt = Long.MAX_VALUE;
//keyToColumns should be in sorted order, clients (at least some of my testing code) assumes this
SortedMap<ByteBuffer, List<ColumnOrSuperColumn>> keyToColumns = new TreeMap<ByteBuffer, List<ColumnOrSuperColumn>>();
Map<ByteBuffer, Set<Dep>> keyToDeps = new HashMap<ByteBuffer, Set<Dep>>();
//WL TODO Add support for doing queries to secondary indices
NavigableMap<Long, List<ByteBuffer>> lvtToKeys = new TreeMap<Long, List<ByteBuffer>>();
for (BlockingQueueCallback<get_range_slices_call> callback : firstRoundCallbacks) {
GetRangeSlicesResult result = callback.getResponseNoInterruption().getResult();
LamportClock.updateTime(result.lts);
for (KeySlice keySlice : result.value) {
ByteBuffer key = keySlice.key;
List<ColumnOrSuperColumn> coscList = keySlice.columns;
//find the evt and lvt for the entire row
EvtAndLvt evtAndLvt = ColumnOrSuperColumnHelper.extractEvtAndLvt(coscList);
if (!lvtToKeys.containsKey(evtAndLvt.getLatestValidTime())) {
lvtToKeys.put(evtAndLvt.getLatestValidTime(), new LinkedList<ByteBuffer>());
}
lvtToKeys.get(evtAndLvt.getLatestValidTime()).add(key);
//if (logger.isTraceEnabled()) { logger.trace("round 1 response for " + printKey(key) + " evt: " + evtAndLvt.getEarliestValidTime() + " lvt: " + evtAndLvt.getLatestValidTime()); }
overallMaxEvt = Math.max(overallMaxEvt, evtAndLvt.getEarliestValidTime());
overallMinLvt = Math.min(overallMinLvt, evtAndLvt.getLatestValidTime());
ClientContext tmpContext = new ClientContext();
for (Iterator<ColumnOrSuperColumn> cosc_it = coscList.iterator(); cosc_it.hasNext(); ) {
ColumnOrSuperColumn cosc = cosc_it.next();
try {
tmpContext.addDep(key, cosc);
} catch (NotFoundException nfe) {
//remove deleted results, it's okay for all result to be removed
cosc_it.remove();
}
}
keyToColumns.put(key, coscList);
keyToDeps.put(key, tmpContext.getDeps());
}
}
//Execute 2nd round if necessary
if (overallMinLvt < overallMaxEvt) {
//get the smallest lvt > maxEvt
long chosenTime = lvtToKeys.navigableKeySet().higher(overallMaxEvt);
List<ByteBuffer> secondRoundKeys = new LinkedList<ByteBuffer>();
for (List<ByteBuffer> keyList : lvtToKeys.headMap(chosenTime).values()) {
secondRoundKeys.addAll(keyList);
}
//invalid all results for second round keys (sanity check, not strictly necessary)
for (ByteBuffer key : secondRoundKeys) {
keyToColumns.remove(key);
keyToDeps.remove(key);
}
Set<ByteBuffer> allKnownKeys = keyToColumns.keySet();
Map<Cassandra.AsyncClient, List<ByteBuffer>> asyncClientToKnownKeys = partitionByAsyncClients(allKnownKeys);
Queue<BlockingQueueCallback<get_range_slices_by_time_call>> secondRoundCallbacks = new LinkedList<BlockingQueueCallback<get_range_slices_by_time_call>>();
for (Entry<Cassandra.AsyncClient, KeyRange> entry : asyncClientToKeyRange.entrySet()) {
Cassandra.AsyncClient asyncClient = entry.getKey();
KeyRange rangeForThisClient = entry.getValue();
BlockingQueueCallback<get_range_slices_by_time_call> callback = new BlockingQueueCallback<get_range_slices_by_time_call>();
secondRoundCallbacks.add(callback);
List<ByteBuffer> knownKeys = asyncClientToKnownKeys.get(asyncClient);
if (knownKeys == null) {
//knownKeys can't be null for thrift encoding
knownKeys = new LinkedList<ByteBuffer>();
}
asyncClient.get_range_slices_by_time(column_parent, predicate, rangeForThisClient, knownKeys, consistencyLevel, chosenTime, LamportClock.sendTimestamp(), callback);
}
for (BlockingQueueCallback<get_range_slices_by_time_call> callback : secondRoundCallbacks) {
GetRangeSlicesResult result = callback.getResponseNoInterruption().getResult();
LamportClock.updateTime(result.lts);
for (KeySlice keySlice : result.value) {
ByteBuffer key = keySlice.key;
List<ColumnOrSuperColumn> coscList = keySlice.columns;
ClientContext tmpContext = new ClientContext();
for (Iterator<ColumnOrSuperColumn> cosc_it = coscList.iterator(); cosc_it.hasNext(); ) {
ColumnOrSuperColumn cosc = cosc_it.next();
try {
tmpContext.addDep(key, cosc);
} catch (NotFoundException nfe) {
//remove deleted results, it's okay for all result to be removed
cosc_it.remove();
}
}
keyToColumns.put(key, coscList);
keyToDeps.put(key, tmpContext.getDeps());
}
}
}
//Add dependencies from counts we return
for (Set<Dep> deps : keyToDeps.values()) {
clientContext.addDeps(deps);
}
List<KeySlice> combinedResults = new ArrayList<KeySlice>();
for (Entry<ByteBuffer, List<ColumnOrSuperColumn>> entry : keyToColumns.entrySet()) {
ByteBuffer key = entry.getKey();
List<ColumnOrSuperColumn> coscList = entry.getValue();
combinedResults.add(new KeySlice(key, coscList));
}
return combinedResults;
}
Aggregations