use of org.apache.ignite.cluster.ClusterNode in project ignite by apache.
the class AgentClusterDemo method tryStart.
/**
* Start ignite node with cacheEmployee and populate it with data.
*/
public static CountDownLatch tryStart() {
if (initGuard.compareAndSet(false, true)) {
log.info("DEMO: Starting embedded nodes for demo...");
System.setProperty(IGNITE_NO_ASCII, "true");
System.setProperty(IGNITE_QUIET, "false");
System.setProperty(IGNITE_UPDATE_NOTIFIER, "false");
System.setProperty(IGNITE_ATOMIC_CACHE_DELETE_HISTORY_SIZE, "20");
System.setProperty(IGNITE_PERFORMANCE_SUGGESTIONS_DISABLED, "true");
final AtomicInteger basePort = new AtomicInteger(60700);
final AtomicInteger cnt = new AtomicInteger(-1);
final ScheduledExecutorService execSrv = newScheduledThreadPool(1, "demo-nodes-start");
execSrv.scheduleAtFixedRate(new Runnable() {
@Override
public void run() {
int idx = cnt.incrementAndGet();
int port = basePort.get();
boolean first = idx == 0;
try {
IgniteConfiguration cfg = igniteConfiguration(port, idx, false);
if (first) {
U.delete(Paths.get(cfg.getWorkDirectory()));
U.resolveWorkDirectory(cfg.getWorkDirectory(), cfg.getDataStorageConfiguration().getStoragePath(), true);
}
Ignite ignite = Ignition.start(cfg);
if (first) {
ClusterNode node = ignite.cluster().localNode();
Collection<String> jettyAddrs = node.attribute(ATTR_REST_JETTY_ADDRS);
if (jettyAddrs == null) {
Ignition.stopAll(true);
throw new IgniteException("DEMO: Failed to start Jetty REST server on embedded node");
}
String jettyHost = jettyAddrs.iterator().next();
Integer jettyPort = node.attribute(ATTR_REST_JETTY_PORT);
if (F.isEmpty(jettyHost) || jettyPort == null)
throw new IgniteException("DEMO: Failed to start Jetty REST handler on embedded node");
log.info("DEMO: Started embedded node for demo purpose [TCP binary port={}, Jetty REST port={}]", port, jettyPort);
demoUrl = String.format("http://%s:%d", jettyHost, jettyPort);
initLatch.countDown();
}
} catch (Throwable e) {
if (first) {
basePort.getAndAdd(50);
log.warn("DEMO: Failed to start embedded node.", e);
} else
log.error("DEMO: Failed to start embedded node.", e);
} finally {
if (idx == NODE_CNT) {
Ignite ignite = Ignition.ignite(SRV_NODE_NAME + 0);
if (ignite != null) {
ignite.cluster().active(true);
deployServices(ignite.services(ignite.cluster().forServers()));
}
log.info("DEMO: All embedded nodes for demo successfully started");
execSrv.shutdown();
}
}
}
}, 1, 5, TimeUnit.SECONDS);
}
return initLatch;
}
use of org.apache.ignite.cluster.ClusterNode in project ignite by apache.
the class CacheUtils method update.
/**
* @param cacheName Cache name.
* @param fun An operation that accepts a cache entry and processes it.
* @param ignite Ignite.
* @param keysGen Keys generator.
* @param <K> Cache key object type.
* @param <V> Cache value object type.
*/
public static <K, V> void update(String cacheName, Ignite ignite, IgniteConsumer<Cache.Entry<K, V>> fun, IgniteSupplier<Set<K>> keysGen) {
bcast(cacheName, ignite, () -> {
Ignite ig = Ignition.localIgnite();
IgniteCache<K, V> cache = ig.getOrCreateCache(cacheName);
Affinity<K> affinity = ig.affinity(cacheName);
ClusterNode locNode = ig.cluster().localNode();
Collection<K> ks = affinity.mapKeysToNodes(keysGen.get()).get(locNode);
if (ks == null)
return;
Map<K, V> m = new ConcurrentHashMap<>();
for (K k : ks) {
V v = cache.localPeek(k);
fun.accept(new CacheEntryImpl<>(k, v));
m.put(k, v);
}
cache.putAll(m);
});
}
use of org.apache.ignite.cluster.ClusterNode in project ignite by apache.
the class CacheUtils method sparseFold.
/**
* Sparse version of fold. This method also applicable to sparse zeroes.
*
* @param cacheName Cache name.
* @param folder Folder.
* @param keyFilter Key filter.
* @param accumulator Accumulator.
* @param zeroValSupp Zero value supplier.
* @param defVal Default value.
* @param defKey Default key.
* @param defValCnt Def value count.
* @param isNilpotent Is nilpotent.
*/
private static <K, V, A> A sparseFold(String cacheName, IgniteBiFunction<Cache.Entry<K, V>, A, A> folder, IgnitePredicate<K> keyFilter, BinaryOperator<A> accumulator, IgniteSupplier<A> zeroValSupp, V defVal, K defKey, long defValCnt, boolean isNilpotent) {
A defRes = zeroValSupp.get();
if (!isNilpotent)
for (int i = 0; i < defValCnt; i++) defRes = folder.apply(new CacheEntryImpl<>(defKey, defVal), defRes);
Collection<A> totalRes = bcast(cacheName, () -> {
Ignite ignite = Ignition.localIgnite();
IgniteCache<K, V> cache = ignite.getOrCreateCache(cacheName);
int partsCnt = ignite.affinity(cacheName).partitions();
// Use affinity in filter for ScanQuery. Otherwise we accept consumer in each node which is wrong.
Affinity affinity = ignite.affinity(cacheName);
ClusterNode locNode = ignite.cluster().localNode();
A a = zeroValSupp.get();
// Iterate over all partitions. Some of them will be stored on that local node.
for (int part = 0; part < partsCnt; part++) {
int p = part;
// Query returns an empty cursor if this partition is not stored on this node.
for (Cache.Entry<K, V> entry : cache.query(new ScanQuery<K, V>(part, (k, v) -> affinity.mapPartitionToNode(p) == locNode && (keyFilter == null || keyFilter.apply(k))))) a = folder.apply(entry, a);
}
return a;
});
return totalRes.stream().reduce(defRes, accumulator);
}
use of org.apache.ignite.cluster.ClusterNode in project ignite by apache.
the class CacheUtils method fold.
/**
* <b>Currently fold supports only commutative operations.<b/>
*
* @param cacheName Cache name.
* @param folder Fold function operating over cache entries.
* @param <K> Cache key object type.
* @param <V> Cache value object type.
* @param <A> Fold result type.
* @return Fold operation result.
*/
public static <K, V, A> Collection<A> fold(String cacheName, IgniteBiFunction<CacheEntry<K, V>, A, A> folder, IgnitePredicate<K> keyFilter) {
return bcast(cacheName, () -> {
Ignite ignite = Ignition.localIgnite();
IgniteCache<K, V> cache = ignite.getOrCreateCache(cacheName);
int partsCnt = ignite.affinity(cacheName).partitions();
// Use affinity in filter for ScanQuery. Otherwise we accept consumer in each node which is wrong.
Affinity affinity = ignite.affinity(cacheName);
ClusterNode locNode = ignite.cluster().localNode();
A a = null;
// Iterate over all partitions. Some of them will be stored on that local node.
for (int part = 0; part < partsCnt; part++) {
int p = part;
// Query returns an empty cursor if this partition is not stored on this node.
for (Cache.Entry<K, V> entry : cache.query(new ScanQuery<K, V>(part, (k, v) -> affinity.mapPartitionToNode(p) == locNode && (keyFilter == null || keyFilter.apply(k))))) a = folder.apply(new CacheEntry<>(entry, cache), a);
}
return a;
});
}
use of org.apache.ignite.cluster.ClusterNode in project ignite by apache.
the class HadoopTestRoundRobinMrPlanner method preparePlan.
/**
* {@inheritDoc}
*/
@Override
public HadoopMapReducePlan preparePlan(HadoopJob job, Collection<ClusterNode> top, @Nullable HadoopMapReducePlan oldPlan) throws IgniteCheckedException {
if (top.isEmpty())
throw new IllegalArgumentException("Topology is empty");
// Has at least one element.
Iterator<ClusterNode> it = top.iterator();
Map<UUID, Collection<HadoopInputSplit>> mappers = new HashMap<>();
for (HadoopInputSplit block : job.input()) {
ClusterNode node = it.next();
Collection<HadoopInputSplit> nodeBlocks = mappers.get(node.id());
if (nodeBlocks == null) {
nodeBlocks = new ArrayList<>();
mappers.put(node.id(), nodeBlocks);
}
nodeBlocks.add(block);
if (!it.hasNext())
it = top.iterator();
}
int[] rdc = new int[job.reducers()];
for (int i = 0; i < rdc.length; i++) rdc[i] = i;
return new HadoopDefaultMapReducePlan(mappers, Collections.singletonMap(it.next().id(), rdc));
}
Aggregations