use of org.apache.ignite.cache.affinity.Affinity in project ignite by apache.
the class CacheUtils method sparseFold.
private static <K, V, A> A sparseFold(String cacheName, IgniteBiFunction<Cache.Entry<K, V>, A, A> folder, IgnitePredicate<K> keyFilter, BinaryOperator<A> accumulator, A zeroVal, V defVal, K defKey, long defValCnt, boolean isNilpotent) {
A defRes = zeroVal;
if (!isNilpotent)
for (int i = 0; i < defValCnt; i++) defRes = folder.apply(new CacheEntryImpl<>(defKey, defVal), defRes);
Collection<A> totalRes = bcast(cacheName, () -> {
Ignite ignite = Ignition.localIgnite();
IgniteCache<K, V> cache = ignite.getOrCreateCache(cacheName);
int partsCnt = ignite.affinity(cacheName).partitions();
// Use affinity in filter for ScanQuery. Otherwise we accept consumer in each node which is wrong.
Affinity affinity = ignite.affinity(cacheName);
ClusterNode localNode = ignite.cluster().localNode();
A a = zeroVal;
// Iterate over all partitions. Some of them will be stored on that local node.
for (int part = 0; part < partsCnt; part++) {
int p = part;
// Query returns an empty cursor if this partition is not stored on this node.
for (Cache.Entry<K, V> entry : cache.query(new ScanQuery<K, V>(part, (k, v) -> affinity.mapPartitionToNode(p) == localNode && (keyFilter == null || keyFilter.apply(k))))) a = folder.apply(entry, a);
}
return a;
});
totalRes.add(defRes);
return totalRes.stream().reduce(zeroVal, accumulator);
}
use of org.apache.ignite.cache.affinity.Affinity in project ignite by apache.
the class CacheUtils method fold.
/**
* <b>Currently fold supports only commutative operations.<b/>
*
* @param cacheName Cache name.
* @param folder Fold function operating over cache entries.
* @param <K> Cache key object type.
* @param <V> Cache value object type.
* @param <A> Fold result type.
* @return Fold operation result.
*/
public static <K, V, A> Collection<A> fold(String cacheName, IgniteBiFunction<CacheEntry<K, V>, A, A> folder, IgnitePredicate<K> keyFilter) {
return bcast(cacheName, () -> {
Ignite ignite = Ignition.localIgnite();
IgniteCache<K, V> cache = ignite.getOrCreateCache(cacheName);
int partsCnt = ignite.affinity(cacheName).partitions();
// Use affinity in filter for ScanQuery. Otherwise we accept consumer in each node which is wrong.
Affinity affinity = ignite.affinity(cacheName);
ClusterNode locNode = ignite.cluster().localNode();
A a = null;
// Iterate over all partitions. Some of them will be stored on that local node.
for (int part = 0; part < partsCnt; part++) {
int p = part;
// Query returns an empty cursor if this partition is not stored on this node.
for (Cache.Entry<K, V> entry : cache.query(new ScanQuery<K, V>(part, (k, v) -> affinity.mapPartitionToNode(p) == locNode && (keyFilter == null || keyFilter.apply(k))))) a = folder.apply(new CacheEntry<>(entry, cache), a);
}
return a;
});
}
use of org.apache.ignite.cache.affinity.Affinity in project ignite by apache.
the class IgniteCacheConcurrentPutGetRemove method putGetRemove.
/**
* @param ccfg Cache configuration.
* @throws Exception If failed.
*/
private void putGetRemove(final CacheConfiguration ccfg) throws Exception {
ignite(0).createCache(ccfg);
try {
long stopTime = System.currentTimeMillis() + 30_000;
int iter = 0;
while (System.currentTimeMillis() < stopTime) {
if (iter++ % 100 == 0)
log.info("Iteration: " + iter);
final AtomicInteger idx = new AtomicInteger();
final int KEYS = 10;
GridTestUtils.runMultiThreaded(new Callable<Void>() {
@Override
public Void call() throws Exception {
int nodeIdx = idx.getAndIncrement() % NODES;
IgniteCache<Object, Object> cache = ignite(nodeIdx).cache(ccfg.getName());
ThreadLocalRandom rnd = ThreadLocalRandom.current();
for (int i = 0; i < 10; i++) {
for (int k = 0; k < KEYS; k++) {
switch(rnd.nextInt(3)) {
case 0:
cache.put(k, rnd.nextInt(10_000));
break;
case 1:
cache.get(k);
break;
case 2:
cache.remove(k);
break;
default:
fail();
}
}
}
return null;
}
}, NODES * 10, "update-thread");
Affinity aff = ignite(0).affinity(ccfg.getName());
for (int k = 0; k < KEYS; k++) {
Collection<ClusterNode> nodes = aff.mapKeyToPrimaryAndBackups(k);
Object expVal = grid(nodes.iterator().next()).cache(ccfg.getName()).get(k);
for (int n = 0; n < NODES; n++) {
Ignite ignite = ignite(n);
IgniteCache<Object, Object> cache = ignite.cache(ccfg.getName());
if (nodes.contains(ignite.cluster().localNode()))
assertEquals(expVal, cache.localPeek(k));
else {
assertNull(cache.localPeek(k));
assertEquals(expVal, cache.get(k));
}
}
}
}
} finally {
ignite(0).destroyCache(ccfg.getName());
}
}
use of org.apache.ignite.cache.affinity.Affinity in project ignite by apache.
the class SplitCache method localEntries.
/**
* Returns local entries for keys corresponding to {@code featureIndexes}.
*
* @param featureIndexes Index of features.
* @param affinity Affinity function.
* @param trainingUUID UUID of training.
* @return local entries for keys corresponding to {@code featureIndexes}.
*/
public static Iterable<Cache.Entry<SplitKey, IgniteBiTuple<Integer, Double>>> localEntries(Set<Integer> featureIndexes, IgniteBiFunction<Integer, Ignite, Object> affinity, UUID trainingUUID) {
Ignite ignite = Ignition.localIgnite();
Set<SplitKey> keys = featureIndexes.stream().map(fIdx -> new SplitKey(trainingUUID, affinity.apply(fIdx, ignite), fIdx)).collect(Collectors.toSet());
Collection<SplitKey> locKeys = affinity().mapKeysToNodes(keys).getOrDefault(ignite.cluster().localNode(), Collections.emptyList());
return () -> {
Function<SplitKey, Cache.Entry<SplitKey, IgniteBiTuple<Integer, Double>>> f = k -> (new CacheEntryImpl<>(k, getOrCreate(ignite).localPeek(k)));
return locKeys.stream().map(f).iterator();
};
}
use of org.apache.ignite.cache.affinity.Affinity in project ignite by apache.
the class MLPGroupUpdateTrainerCacheInput method batchSupplier.
/**
* {@inheritDoc}
*/
@Override
public IgniteSupplier<IgniteBiTuple<Matrix, Matrix>> batchSupplier() {
String cName = cache.getName();
// This line is for prohibiting of 'this' object be caught into serialization context of lambda.
int bs = batchSize;
// This line is for prohibiting of 'this' object be caught into serialization context of lambda.
Random r = rand;
return () -> {
Ignite ignite = Ignition.localIgnite();
IgniteCache<Integer, LabeledVector<Vector, Vector>> cache = ignite.getOrCreateCache(cName);
int total = cache.size();
Affinity<Integer> affinity = ignite.affinity(cName);
List<Integer> allKeys = IntStream.range(0, total).boxed().collect(Collectors.toList());
List<Integer> keys = new ArrayList<>(affinity.mapKeysToNodes(allKeys).get(ignite.cluster().localNode()));
int locKeysCnt = keys.size();
int[] selected = Utils.selectKDistinct(locKeysCnt, Math.min(bs, locKeysCnt), r);
// Get dimensions of vectors in cache. We suppose that every feature vector has
// same dimension d 1 and every label has the same dimension d2.
LabeledVector<Vector, Vector> dimEntry = cache.get(keys.get(selected[0]));
Matrix inputs = new DenseLocalOnHeapMatrix(dimEntry.features().size(), bs);
Matrix groundTruth = new DenseLocalOnHeapMatrix(dimEntry.label().size(), bs);
for (int i = 0; i < selected.length; i++) {
LabeledVector<Vector, Vector> labeled = cache.get(keys.get(selected[i]));
inputs.assignColumn(i, labeled.features());
groundTruth.assignColumn(i, labeled.label());
}
return new IgniteBiTuple<>(inputs, groundTruth);
};
}
Aggregations