use of com.google.common.collect.AbstractIterator in project xtext-core by eclipse.
the class AbstractTrace method getAllTraceRegions.
protected Iterable<AbstractTraceRegion> getAllTraceRegions(final ITextRegion localRegion) {
final AbstractTraceRegion left = findTraceRegionAtLeftOffset(localRegion.getOffset());
final int end = localRegion.getOffset() + localRegion.getLength();
if (left == null) {
return Collections.emptyList();
}
return new Iterable<AbstractTraceRegion>() {
@Override
public Iterator<AbstractTraceRegion> iterator() {
AbstractTraceRegion root = getRootTraceRegion();
if (root == null)
return ImmutableSet.<AbstractTraceRegion>of().iterator();
final Iterator<AbstractTraceRegion> allLeafs = root.leafIterator();
Iterator<AbstractTraceRegion> result = new AbstractIterator<AbstractTraceRegion>() {
AbstractTraceRegion first;
{
while (first == null && allLeafs.hasNext()) {
AbstractTraceRegion next = allLeafs.next();
if (next.getMyOffset() == left.getMyOffset()) {
this.first = next;
break;
}
}
}
@Override
protected AbstractTraceRegion computeNext() {
if (first != null) {
AbstractTraceRegion result = first;
first = null;
return result;
}
if (!allLeafs.hasNext())
return endOfData();
AbstractTraceRegion candidate = allLeafs.next();
if (candidate.getMyOffset() >= end) {
return endOfData();
}
return candidate;
}
};
return result;
}
};
}
use of com.google.common.collect.AbstractIterator in project voldemort by voldemort.
the class AdminServiceBasicTest method testUpdateTimeBased.
@Test
public void testUpdateTimeBased() {
final long baseTimeMs = System.currentTimeMillis();
String storeName = "test-replication-persistent";
final HashMap<ByteArray, byte[]> entries = ServerTestUtils.createRandomKeyValuePairs(5);
final List<ByteArray> keys = new ArrayList<ByteArray>(entries.keySet());
// Insert some data for the keys
Store<ByteArray, byte[], byte[]> nodeStore = getStore(0, storeName);
for (int i = 0; i < keys.size(); i++) {
ByteArray key = keys.get(i);
byte[] val = entries.get(key);
long ts = 0;
if (i == 0) {
// have multiple conflicting versions.. one lower ts and one
// higher ts, than the streaming version
Versioned<byte[]> v1 = new Versioned<byte[]>(val, TestUtils.getClockWithTs(baseTimeMs - 1, 1));
nodeStore.put(key, v1, null);
Versioned<byte[]> v2 = new Versioned<byte[]>(val, TestUtils.getClockWithTs(baseTimeMs + 1, 2));
nodeStore.put(key, v2, null);
} else {
if (i % 2 == 0) {
// even keys : streaming write wins
ts = baseTimeMs + i;
} else {
// odd keys : storage version wins
ts = baseTimeMs - i;
}
nodeStore.put(key, new Versioned<byte[]>(val, new VectorClock(ts)), null);
}
}
Iterator<Pair<ByteArray, Versioned<byte[]>>> iterator = new AbstractIterator<Pair<ByteArray, Versioned<byte[]>>>() {
final Iterator<ByteArray> keysItr = keys.iterator();
int keyCount = 0;
@Override
protected Pair<ByteArray, Versioned<byte[]>> computeNext() {
while (keysItr.hasNext()) {
ByteArray key = keysItr.next();
byte[] val = entries.get(key);
long ts = 0;
if (keyCount == 0) {
// streaming put will be in the middle of two version on
// storage
keyCount++;
return new Pair<ByteArray, Versioned<byte[]>>(key, new Versioned<byte[]>(val, new VectorClock(baseTimeMs)));
} else {
if (keyCount % 2 == 0) {
// even keys : streaming write wins
ts = baseTimeMs - keyCount;
} else {
// odd keys : storage version wins
ts = baseTimeMs + keyCount;
}
keyCount++;
return new Pair<ByteArray, Versioned<byte[]>>(key, new Versioned<byte[]>(val, new VectorClock(ts)));
}
}
return endOfData();
}
};
getAdminClient().streamingOps.updateEntriesTimeBased(0, storeName, iterator, null);
// check updated values
for (int i = 0; i < keys.size(); i++) {
ByteArray key = keys.get(i);
List<Versioned<byte[]>> vals = nodeStore.get(key, null);
if (i == 0) {
assertEquals("Must contain exactly two versions", 2, vals.size());
Set<Long> storageTimeSet = new HashSet<Long>();
storageTimeSet.add(((VectorClock) vals.get(0).getVersion()).getTimestamp());
storageTimeSet.add(((VectorClock) vals.get(1).getVersion()).getTimestamp());
Set<Long> expectedTimeSet = new HashSet<Long>();
expectedTimeSet.add(baseTimeMs - 1);
expectedTimeSet.add(baseTimeMs + 1);
assertEquals("Streaming put should have backed off since atleast one version has greater timestamp", expectedTimeSet, storageTimeSet);
} else {
assertEquals("Must contain exactly one version", 1, vals.size());
assertEquals("Must contain the version the the maximum timestamp", baseTimeMs + i, ((VectorClock) vals.get(0).getVersion()).getTimestamp());
}
}
}
use of com.google.common.collect.AbstractIterator in project voldemort by voldemort.
the class VoldemortAdminTool method readEntriesBinary.
private static Iterator<Pair<ByteArray, Versioned<byte[]>>> readEntriesBinary(File inputDir, String storeName) throws IOException {
File inputFile = new File(inputDir, storeName + ".entries");
if (!inputFile.exists()) {
throw new FileNotFoundException("File " + inputFile.getAbsolutePath() + " does not exist!");
}
final DataInputStream dis = new DataInputStream(new BufferedInputStream(new FileInputStream(inputFile)));
return new AbstractIterator<Pair<ByteArray, Versioned<byte[]>>>() {
@Override
protected Pair<ByteArray, Versioned<byte[]>> computeNext() {
try {
int length = dis.readInt();
byte[] keyBytes = new byte[length];
ByteUtils.read(dis, keyBytes);
length = dis.readInt();
byte[] versionBytes = new byte[length];
ByteUtils.read(dis, versionBytes);
length = dis.readInt();
byte[] valueBytes = new byte[length];
ByteUtils.read(dis, valueBytes);
ByteArray key = new ByteArray(keyBytes);
VectorClock version = new VectorClock(versionBytes);
Versioned<byte[]> value = new Versioned<byte[]>(valueBytes, version);
return new Pair<ByteArray, Versioned<byte[]>>(key, value);
} catch (EOFException e) {
try {
dis.close();
} catch (IOException ie) {
ie.printStackTrace();
}
return endOfData();
} catch (IOException e) {
try {
dis.close();
} catch (IOException ie) {
ie.printStackTrace();
}
throw new VoldemortException("Error reading from input file ", e);
}
}
};
}
use of com.google.common.collect.AbstractIterator in project java-driver by datastax.
the class TokenAwarePolicy method newQueryPlan.
/**
* {@inheritDoc}
* <p/>
* The returned plan will first return local replicas for the query (i.e.
* replicas whose {@linkplain HostDistance distance} according to the child policy is {@code LOCAL}),
* if it can determine them (i.e. mainly if the statement's
* {@linkplain Statement#getRoutingKey(ProtocolVersion, CodecRegistry) routing key}
* is not {@code null}), and ordered according to the {@linkplain ReplicaOrdering ordering strategy}
* specified at instantiation; following what it will return the rest of the child policy's
* original query plan.
*/
@Override
public Iterator<Host> newQueryPlan(final String loggedKeyspace, final Statement statement) {
ByteBuffer partitionKey = statement.getRoutingKey(protocolVersion, codecRegistry);
String keyspace = statement.getKeyspace();
if (keyspace == null)
keyspace = loggedKeyspace;
if (partitionKey == null || keyspace == null)
return childPolicy.newQueryPlan(keyspace, statement);
final Set<Host> replicas = clusterMetadata.getReplicas(Metadata.quote(keyspace), partitionKey);
if (replicas.isEmpty())
return childPolicy.newQueryPlan(loggedKeyspace, statement);
if (replicaOrdering == ReplicaOrdering.NEUTRAL) {
final Iterator<Host> childIterator = childPolicy.newQueryPlan(keyspace, statement);
return new AbstractIterator<Host>() {
private List<Host> nonReplicas;
private Iterator<Host> nonReplicasIterator;
@Override
protected Host computeNext() {
while (childIterator.hasNext()) {
Host host = childIterator.next();
if (host.isUp() && replicas.contains(host) && childPolicy.distance(host) == HostDistance.LOCAL) {
// UP replicas should be prioritized, retaining order from childPolicy
return host;
} else {
// save for later
if (nonReplicas == null)
nonReplicas = new ArrayList<Host>();
nonReplicas.add(host);
}
}
// This should only engage if all local replicas are DOWN
if (nonReplicas != null) {
if (nonReplicasIterator == null)
nonReplicasIterator = nonReplicas.iterator();
if (nonReplicasIterator.hasNext())
return nonReplicasIterator.next();
}
return endOfData();
}
};
} else {
final Iterator<Host> replicasIterator;
if (replicaOrdering == ReplicaOrdering.RANDOM) {
List<Host> replicasList = Lists.newArrayList(replicas);
Collections.shuffle(replicasList);
replicasIterator = replicasList.iterator();
} else {
replicasIterator = replicas.iterator();
}
return new AbstractIterator<Host>() {
private Iterator<Host> childIterator;
@Override
protected Host computeNext() {
while (replicasIterator.hasNext()) {
Host host = replicasIterator.next();
if (host.isUp() && childPolicy.distance(host) == HostDistance.LOCAL)
return host;
}
if (childIterator == null)
childIterator = childPolicy.newQueryPlan(loggedKeyspace, statement);
while (childIterator.hasNext()) {
Host host = childIterator.next();
// Skip it if it was already a local replica
if (!replicas.contains(host) || childPolicy.distance(host) != HostDistance.LOCAL)
return host;
}
return endOfData();
}
};
}
}
use of com.google.common.collect.AbstractIterator in project presto by prestodb.
the class TempStorageSingleStreamSpiller method closeWhenExhausted.
private static <T> Iterator<T> closeWhenExhausted(Iterator<T> iterator, Closeable resource) {
requireNonNull(iterator, "iterator is null");
requireNonNull(resource, "resource is null");
return new AbstractIterator<T>() {
@Override
protected T computeNext() {
if (iterator.hasNext()) {
return iterator.next();
}
try {
resource.close();
} catch (IOException e) {
throw new UncheckedIOException(e);
}
return endOfData();
}
};
}
Aggregations