use of org.apache.ignite.lang.IgniteBiPredicate in project ignite by apache.
the class RenameIndexTreeTest method testRenameFromTask.
/**
* Checking the correctness of {@link DurableBackgroundCleanupIndexTreeTaskV2#findIndexRootPages}
* and {@link DurableBackgroundCleanupIndexTreeTaskV2#renameIndexRootPages}.
*
* @throws Exception If failed.
*/
@Test
public void testRenameFromTask() throws Exception {
IgniteEx n = startGrid(0);
IgniteCache<Integer, Person> cache = n.cache(DEFAULT_CACHE_NAME);
populate(cache, 100);
String idxName = "IDX0";
createIdx(cache, idxName);
SortedIndexDefinition idxDef = indexDefinition(index(n, cache, idxName));
GridCacheContext<Integer, Person> cctx = cacheContext(cache);
String oldTreeName = idxDef.treeName();
int segments = idxDef.segments();
assertExistIndexRoot(cache, oldTreeName, segments, true);
Map<Integer, RootPage> rootPages0 = findIndexRoots(cache, oldTreeName, segments);
Map<Integer, RootPage> rootPages1 = findIndexRootPages(cctx.group(), cctx.name(), oldTreeName, segments);
assertEqualsCollections(toPageIds(rootPages0), toPageIds(rootPages1));
long currSegIdx = walMgr(n).currentSegment();
String newTreeName = UUID.randomUUID().toString();
renameIndexRootPages(cctx.group(), cctx.name(), oldTreeName, newTreeName, segments);
assertExistIndexRoot(cache, oldTreeName, segments, false);
assertExistIndexRoot(cache, newTreeName, segments, true);
assertTrue(findIndexRootPages(cctx.group(), cctx.name(), oldTreeName, segments).isEmpty());
rootPages0 = findIndexRoots(cache, newTreeName, segments);
rootPages1 = findIndexRootPages(cctx.group(), cctx.name(), newTreeName, segments);
assertEqualsCollections(toPageIds(rootPages0), toPageIds(rootPages1));
WALPointer start = new WALPointer(currSegIdx, 0, 0);
IgniteBiPredicate<WALRecord.RecordType, WALPointer> pred = (t, p) -> t == INDEX_ROOT_PAGE_RENAME_RECORD;
try (WALIterator it = walMgr(n).replay(start, pred)) {
List<WALRecord> records = stream(it.spliterator(), false).map(IgniteBiTuple::get2).collect(toList());
assertEquals(1, records.size());
IndexRenameRootPageRecord record = (IndexRenameRootPageRecord) records.get(0);
assertEquals(cctx.cacheId(), record.cacheId());
assertEquals(oldTreeName, record.oldTreeName());
assertEquals(newTreeName, record.newTreeName());
assertEquals(segments, record.segments());
}
}
use of org.apache.ignite.lang.IgniteBiPredicate in project ignite by apache.
the class SocketStreamerSelfTest method test.
/**
* @param converter Converter.
* @param r Runnable..
*/
private void test(@Nullable SocketMessageConverter<Message> converter, @Nullable byte[] delim, Runnable r, boolean oneMessagePerTuple) throws Exception {
SocketStreamer<Message, Integer, String> sockStmr = null;
Ignite ignite = grid(0);
IgniteCache<Integer, String> cache = ignite.cache(DEFAULT_CACHE_NAME);
cache.clear();
try (IgniteDataStreamer<Integer, String> stmr = ignite.dataStreamer(DEFAULT_CACHE_NAME)) {
stmr.allowOverwrite(true);
stmr.autoFlushFrequency(10);
sockStmr = new SocketStreamer<>();
sockStmr.setIgnite(ignite);
sockStmr.setStreamer(stmr);
sockStmr.setPort(port);
sockStmr.setDelimiter(delim);
if (oneMessagePerTuple) {
sockStmr.setSingleTupleExtractor(new StreamSingleTupleExtractor<Message, Integer, String>() {
@Override
public Map.Entry<Integer, String> extract(Message msg) {
return new IgniteBiTuple<>(msg.key, msg.val);
}
});
} else {
sockStmr.setMultipleTupleExtractor(new StreamMultipleTupleExtractor<Message, Integer, String>() {
@Override
public Map<Integer, String> extract(Message msg) {
Map<Integer, String> answer = new HashMap<>();
for (int value : msg.values) {
answer.put(value, Integer.toString(value));
}
return answer;
}
});
}
if (converter != null)
sockStmr.setConverter(converter);
final CountDownLatch latch = new CountDownLatch(CNT);
final GridConcurrentHashSet<CacheEvent> evts = new GridConcurrentHashSet<>();
IgniteBiPredicate<UUID, CacheEvent> locLsnr = new IgniteBiPredicate<UUID, CacheEvent>() {
@Override
public boolean apply(UUID uuid, CacheEvent evt) {
evts.add(evt);
latch.countDown();
return true;
}
};
ignite.events(ignite.cluster().forCacheNodes(DEFAULT_CACHE_NAME)).remoteListen(locLsnr, null, EVT_CACHE_OBJECT_PUT);
sockStmr.start();
r.run();
latch.await();
for (int i = 0; i < CNT; i++) {
Object val = cache.get(i);
String exp = Integer.toString(i);
if (!exp.equals(val))
log.error("Unexpected cache value [key=" + i + ", exp=" + exp + ", val=" + val + ", evts=" + evts + ']');
assertEquals(exp, val);
}
assertEquals(CNT, cache.size(CachePeekMode.PRIMARY));
} finally {
if (sockStmr != null)
sockStmr.stop();
}
}
use of org.apache.ignite.lang.IgniteBiPredicate in project ignite by apache.
the class IgniteCacheAbstractQuerySelfTest method testScanQueryEvents.
/**
* @throws Exception If failed.
*/
@Test
public void testScanQueryEvents() throws Exception {
final Map<Integer, Integer> map = new ConcurrentHashMap<>();
final IgniteCache<Integer, Integer> cache = jcache(Integer.class, Integer.class);
final boolean evtsDisabled = cache.getConfiguration(CacheConfiguration.class).isEventsDisabled();
final CountDownLatch latch = new CountDownLatch(evtsDisabled ? 0 : 10);
final CountDownLatch execLatch = new CountDownLatch(evtsDisabled ? 0 : cacheMode() == REPLICATED ? 1 : gridCount());
IgnitePredicate[] objReadLsnrs = new IgnitePredicate[gridCount()];
IgnitePredicate[] qryExecLsnrs = new IgnitePredicate[gridCount()];
for (int i = 0; i < gridCount(); i++) {
IgnitePredicate<Event> pred = new IgnitePredicate<Event>() {
@Override
public boolean apply(Event evt) {
assert evt instanceof CacheQueryReadEvent;
if (evtsDisabled)
fail("Cache events are disabled");
CacheQueryReadEvent<Integer, Integer> qe = (CacheQueryReadEvent<Integer, Integer>) evt;
assertEquals(SCAN.name(), qe.queryType());
assertEquals(cache.getName(), qe.cacheName());
assertNull(qe.className());
assertNull(null, qe.clause());
assertNotNull(qe.scanQueryFilter());
assertNull(qe.continuousQueryFilter());
assertNull(qe.arguments());
map.put(qe.key(), qe.value());
latch.countDown();
return true;
}
};
grid(i).events().localListen(pred, EVT_CACHE_QUERY_OBJECT_READ);
objReadLsnrs[i] = pred;
IgnitePredicate<Event> execPred = new IgnitePredicate<Event>() {
@Override
public boolean apply(Event evt) {
assert evt instanceof CacheQueryExecutedEvent;
if (evtsDisabled)
fail("Cache events are disabled");
CacheQueryExecutedEvent qe = (CacheQueryExecutedEvent) evt;
assertEquals(SCAN.name(), qe.queryType());
assertEquals(cache.getName(), qe.cacheName());
assertNull(qe.className());
assertNull(null, qe.clause());
assertNotNull(qe.scanQueryFilter());
assertNull(qe.continuousQueryFilter());
assertNull(qe.arguments());
execLatch.countDown();
return true;
}
};
grid(i).events().localListen(execPred, EVT_CACHE_QUERY_EXECUTED);
qryExecLsnrs[i] = execPred;
}
try {
for (int i = 0; i < 20; i++) cache.put(i, i);
IgniteBiPredicate<Integer, Integer> filter = new IgniteBiPredicate<Integer, Integer>() {
@Override
public boolean apply(Integer k, Integer v) {
return k >= 10;
}
};
QueryCursor<Cache.Entry<Integer, Integer>> q = cache.query(new ScanQuery<>(filter));
q.getAll();
assert latch.await(1000, MILLISECONDS);
assert execLatch.await(1000, MILLISECONDS);
if (!evtsDisabled) {
assertEquals(10, map.size());
for (int i = 10; i < 20; i++) assertEquals(i, map.get(i).intValue());
}
} finally {
for (int i = 0; i < gridCount(); i++) {
grid(i).events().stopLocalListen(objReadLsnrs[i]);
grid(i).events().stopLocalListen(qryExecLsnrs[i]);
}
}
}
use of org.apache.ignite.lang.IgniteBiPredicate in project ignite by apache.
the class IgniteQueryDedicatedPoolTest method testScanQueryUsesDedicatedThreadPool.
/**
* Tests that Scan queries are executed in dedicated pool
* @throws Exception If failed.
*/
@Test
public void testScanQueryUsesDedicatedThreadPool() throws Exception {
startGrid("server");
try (Ignite client = startClientGrid("client")) {
IgniteCache<Integer, Integer> cache = client.cache(CACHE_NAME);
cache.put(0, 0);
QueryCursor<Cache.Entry<Object, Object>> cursor = cache.query(new ScanQuery<>(new IgniteBiPredicate<Object, Object>() {
@Override
public boolean apply(Object o, Object o2) {
return F.eq(GridIoManager.currentPolicy(), GridIoPolicy.QUERY_POOL);
}
}));
assertEquals(1, cursor.getAll().size());
cursor.close();
}
}
use of org.apache.ignite.lang.IgniteBiPredicate in project ignite by apache.
the class ComputeUtils method getData.
/**
* Extracts partition {@code data} from the local storage, if it's not found in local storage recovers this {@code
* data} from a partition {@code upstream} and {@code context}. Be aware that this method should be called from
* the node where partition is placed.
*
* @param ignite Ignite instance.
* @param upstreamCacheName Name of an {@code upstream} cache.
* @param filter Filter for {@code upstream} data.
* @param transformerBuilder Builder of upstream transformers.
* @param datasetCacheName Name of a partition {@code context} cache.
* @param datasetId Dataset ID.
* @param partDataBuilder Partition data builder.
* @param env Learning environment.
* @param <K> Type of a key in {@code upstream} data.
* @param <V> Type of a value in {@code upstream} data.
* @param <C> Type of a partition {@code context}.
* @param <D> Type of a partition {@code data}.
* @return Partition {@code data}.
*/
public static <K, V, C extends Serializable, D extends AutoCloseable> D getData(Ignite ignite, String upstreamCacheName, IgniteBiPredicate<K, V> filter, UpstreamTransformerBuilder transformerBuilder, String datasetCacheName, UUID datasetId, PartitionDataBuilder<K, V, C, D> partDataBuilder, LearningEnvironment env, boolean isKeepBinary) {
PartitionDataStorage dataStorage = (PartitionDataStorage) ignite.cluster().nodeLocalMap().computeIfAbsent(String.format(DATA_STORAGE_KEY_TEMPLATE, datasetId), key -> new PartitionDataStorage());
final int part = env.partition();
return dataStorage.computeDataIfAbsent(part, () -> {
IgniteCache<Integer, C> learningCtxCache = ignite.cache(datasetCacheName);
C ctx = learningCtxCache.get(part);
IgniteCache<K, V> upstreamCache = ignite.cache(upstreamCacheName);
if (isKeepBinary)
upstreamCache = upstreamCache.withKeepBinary();
ScanQuery<K, V> qry = new ScanQuery<>();
qry.setLocal(true);
qry.setPartition(part);
qry.setFilter(filter);
UpstreamTransformer transformer = transformerBuilder.build(env);
UpstreamTransformer transformerCp = Utils.copy(transformer);
long cnt = computeCount(upstreamCache, qry, transformer);
if (cnt > 0) {
try (QueryCursor<UpstreamEntry<K, V>> cursor = upstreamCache.query(qry, e -> new UpstreamEntry<>(e.getKey(), e.getValue()))) {
Iterator<UpstreamEntry<K, V>> it = cursor.iterator();
Stream<UpstreamEntry> transformedStream = transformerCp.transform(Utils.asStream(it, cnt).map(x -> (UpstreamEntry) x));
it = Utils.asStream(transformedStream.iterator()).map(x -> (UpstreamEntry<K, V>) x).iterator();
Iterator<UpstreamEntry<K, V>> iter = new IteratorWithConcurrentModificationChecker<>(it, cnt, "Cache expected to be not modified during dataset data building [partition=" + part + ']');
return partDataBuilder.build(env, iter, cnt, ctx);
}
}
return null;
});
}
Aggregations