use of javax.cache.event.CacheEntryEventFilter in project ignite by apache.
the class CacheContinuousQueryOperationP2PTest method testContinuousQuery.
/**
* @param ccfg Cache configuration.
* @param isClient Client.
* @throws Exception If failed.
*/
protected void testContinuousQuery(CacheConfiguration<Object, Object> ccfg, boolean isClient) throws Exception {
ignite(0).createCache(ccfg);
ThreadLocalRandom rnd = ThreadLocalRandom.current();
QueryCursor<?> cur = null;
final Class<Factory<CacheEntryEventFilter>> evtFilterFactory = (Class<Factory<CacheEntryEventFilter>>) getExternalClassLoader().loadClass("org.apache.ignite.tests.p2p.CacheDeploymentEntryEventFilterFactory");
final CountDownLatch latch = new CountDownLatch(10);
ContinuousQuery<Integer, Integer> qry = new ContinuousQuery<>();
TestLocalListener localLsnr = new TestLocalListener() {
@Override
public void onEvent(Iterable<CacheEntryEvent<? extends Integer, ? extends Integer>> evts) throws CacheEntryListenerException {
for (CacheEntryEvent<? extends Integer, ? extends Integer> evt : evts) {
latch.countDown();
log.info("Received event: " + evt);
}
}
};
MutableCacheEntryListenerConfiguration<Integer, Integer> lsnrCfg = new MutableCacheEntryListenerConfiguration<>(new FactoryBuilder.SingletonFactory<>(localLsnr), (Factory<? extends CacheEntryEventFilter<? super Integer, ? super Integer>>) (Object) evtFilterFactory.newInstance(), true, true);
qry.setLocalListener(localLsnr);
qry.setRemoteFilterFactory((Factory<? extends CacheEntryEventFilter<Integer, Integer>>) (Object) evtFilterFactory.newInstance());
IgniteCache<Integer, Integer> cache = null;
try {
if (isClient)
cache = grid(NODES - 1).cache(ccfg.getName());
else
cache = grid(rnd.nextInt(NODES - 1)).cache(ccfg.getName());
cur = cache.query(qry);
cache.registerCacheEntryListener(lsnrCfg);
for (int i = 0; i < 10; i++) cache.put(i, i);
assertTrue(latch.await(3, TimeUnit.SECONDS));
} finally {
if (cur != null)
cur.close();
if (cache != null)
cache.deregisterCacheEntryListener(lsnrCfg);
}
}
use of javax.cache.event.CacheEntryEventFilter in project ignite by apache.
the class CacheContinuousQueryHandler method register.
/**
* {@inheritDoc}
*/
@Override
public RegisterStatus register(final UUID nodeId, final UUID routineId, final GridKernalContext ctx) throws IgniteCheckedException {
assert nodeId != null;
assert routineId != null;
assert ctx != null;
initLocalListener(locLsnr, ctx);
if (initFut == null) {
initFut = p2pUnmarshalFut.chain((fut) -> {
try {
fut.get();
initRemoteFilter(getEventFilter0(), ctx);
IgniteClosure trans = getTransformer0();
if (trans != null)
ctx.resource().injectGeneric(trans);
} catch (IgniteCheckedException | ExceptionInInitializerError e) {
throw new IgniteException("Failed to initialize a continuous query.", e);
}
return null;
});
}
if (initFut.error() != null)
throw new IgniteCheckedException("Failed to initialize a continuous query.", initFut.error());
entryBufs = new ConcurrentHashMap<>();
ackBuf = new CacheContinuousQueryAcknowledgeBuffer();
rcvs = new ConcurrentHashMap<>();
this.nodeId = nodeId;
this.routineId = routineId;
this.ctx = ctx;
final boolean loc = nodeId.equals(ctx.localNodeId());
assert !skipPrimaryCheck || loc;
log = ctx.log(CU.CONTINUOUS_QRY_LOG_CATEGORY);
CacheContinuousQueryListener<K, V> lsnr = new CacheContinuousQueryListener<K, V>() {
@Override
public void onBeforeRegister() {
GridCacheContext<K, V> cctx = cacheContext(ctx);
if (cctx != null && !cctx.isLocal())
cctx.topology().readLock();
}
@Override
public void onAfterRegister() {
GridCacheContext<K, V> cctx = cacheContext(ctx);
if (cctx != null && !cctx.isLocal())
cctx.topology().readUnlock();
}
@Override
public void onRegister() {
GridCacheContext<K, V> cctx = cacheContext(ctx);
if (cctx != null && !cctx.isLocal())
locInitUpdCntrs = toCountersMap(cctx.topology().localUpdateCounters(false));
}
@Override
public boolean keepBinary() {
return keepBinary;
}
@Override
public void onEntryUpdated(final CacheContinuousQueryEvent<K, V> evt, boolean primary, final boolean recordIgniteEvt, GridDhtAtomicAbstractUpdateFuture fut) {
if (ignoreExpired && evt.getEventType() == EventType.EXPIRED)
return;
if (log.isDebugEnabled())
log.debug("Entry updated on affinity node [evt=" + evt + ", primary=" + primary + ']');
final GridCacheContext<K, V> cctx = cacheContext(ctx);
// Check that cache stopped.
if (cctx == null)
return;
if (!needNotify(false, cctx, -1, -1, evt))
return;
// skipPrimaryCheck is set only when listen locally for replicated cache events.
assert !skipPrimaryCheck || (cctx.isReplicated() && ctx.localNodeId().equals(nodeId));
if (asyncCb) {
ContinuousQueryAsyncClosure clsr = new ContinuousQueryAsyncClosure(primary, evt, recordIgniteEvt, fut);
ctx.pools().asyncCallbackPool().execute(clsr, evt.partitionId());
} else {
final boolean notify = filter(evt);
if (log.isDebugEnabled())
log.debug("Filter invoked for event [evt=" + evt + ", primary=" + primary + ", notify=" + notify + ']');
if (primary || skipPrimaryCheck)
onEntryUpdate(evt, notify, loc, recordIgniteEvt);
else
handleBackupEntry(cctx, evt.entry());
}
}
@Override
public void onUnregister() {
try {
CacheEntryEventFilter filter = getEventFilter();
if (filter instanceof PlatformContinuousQueryFilter)
((PlatformContinuousQueryFilter) filter).onQueryUnregister();
} catch (IgniteCheckedException e) {
if (log.isDebugEnabled()) {
log.debug("Failed to execute the onUnregister callback " + "on the continuoue query listener. " + "[nodeId=" + nodeId + ", routineId=" + routineId + ", cacheName=" + cacheName + ", err=" + e + "]");
}
}
}
@Override
public void cleanupOnAck(Map<Integer, Long> updateCntrs) {
for (Map.Entry<Integer, Long> e : updateCntrs.entrySet()) {
CacheContinuousQueryEventBuffer buf = entryBufs.get(e.getKey());
if (buf != null)
buf.cleanupOnAck(e.getValue());
}
}
@Override
public void flushOnExchangeDone(GridKernalContext ctx, AffinityTopologyVersion topVer) {
assert topVer != null;
try {
GridCacheContext<K, V> cctx = cacheContext(ctx);
ClusterNode node = ctx.discovery().node(nodeId);
for (Map.Entry<Integer, CacheContinuousQueryEventBuffer> bufE : entryBufs.entrySet()) {
CacheContinuousQueryEventBuffer buf = bufE.getValue();
Collection<CacheContinuousQueryEntry> entries = buf.flushOnExchange((cntr, filtered) -> createFilteredEntry(cctx.cacheId(), bufE.getKey(), topVer, cntr, filtered));
if (entries == null || node == null)
continue;
for (CacheContinuousQueryEntry e : entries) {
e.markBackup();
if (!e.isFiltered())
prepareEntry(cctx, nodeId, e);
}
ctx.continuous().addBackupNotification(nodeId, routineId, entries, topic);
}
} catch (IgniteCheckedException e) {
U.error(ctx.log(CU.CONTINUOUS_QRY_LOG_CATEGORY), "Failed to send backup event notification to node: " + nodeId, e);
}
}
@Override
public void acknowledgeBackupOnTimeout(GridKernalContext ctx) {
sendBackupAcknowledge(ackBuf.acknowledgeOnTimeout(), routineId, ctx);
}
@Override
public void skipUpdateEvent(CacheContinuousQueryEvent<K, V> evt, AffinityTopologyVersion topVer, boolean primary) {
assert evt != null;
CacheContinuousQueryEntry e = evt.entry();
e.markFiltered();
onEntryUpdated(evt, primary, false, null);
}
@Override
public CounterSkipContext skipUpdateCounter(final GridCacheContext cctx, @Nullable CounterSkipContext skipCtx, int part, long cntr, AffinityTopologyVersion topVer, boolean primary) {
if (skipCtx == null)
skipCtx = new CounterSkipContext(part, cntr, topVer);
if (!needNotify(true, cctx, part, cntr, null))
return skipCtx;
if (loc) {
assert !locOnly;
final Collection<CacheEntryEvent<? extends K, ? extends V>> evts = handleEvent(ctx, skipCtx.entry());
if (!evts.isEmpty()) {
if (asyncCb) {
ctx.pools().asyncCallbackPool().execute(new Runnable() {
@Override
public void run() {
try {
notifyLocalListener(evts, getTransformer());
} catch (IgniteCheckedException ex) {
U.error(ctx.log(CU.CONTINUOUS_QRY_LOG_CATEGORY), "Failed to notify local listener.", ex);
}
}
}, part);
} else
skipCtx.addProcessClosure(new Runnable() {
@Override
public void run() {
try {
notifyLocalListener(evts, getTransformer());
} catch (IgniteCheckedException ex) {
U.error(ctx.log(CU.CONTINUOUS_QRY_LOG_CATEGORY), "Failed to notify local listener.", ex);
}
}
});
}
return skipCtx;
}
CacheContinuousQueryEventBuffer buf = partitionBuffer(cctx, part);
final Object entryOrList = buf.processEntry(skipCtx.entry(), !primary);
if (entryOrList != null) {
skipCtx.addProcessClosure(new Runnable() {
@Override
public void run() {
try {
ctx.continuous().addNotification(nodeId, routineId, entryOrList, topic, false, true);
} catch (ClusterTopologyCheckedException ex) {
if (log.isDebugEnabled())
log.debug("Failed to send event notification to node, node left cluster " + "[node=" + nodeId + ", err=" + ex + ']');
} catch (IgniteCheckedException ex) {
U.error(ctx.log(CU.CONTINUOUS_QRY_LOG_CATEGORY), "Failed to send event notification to node: " + nodeId, ex);
}
}
});
}
return skipCtx;
}
@Override
public void onPartitionEvicted(int part) {
entryBufs.remove(part);
}
@Override
public boolean oldValueRequired() {
return oldValRequired;
}
@Override
public boolean notifyExisting() {
return notifyExisting;
}
private String taskName() {
return ctx.security().enabled() ? ctx.task().resolveTaskName(taskHash) : null;
}
@Override
public boolean isPrimaryOnly() {
return locOnly && !skipPrimaryCheck;
}
/**
* Checks whether it is need to notify listeners.
*
* @param skipEvt {@code True} if this is a skip counter event.
* @param cctx Cache context.
* @param part Partition id.
* @param cntr Update counter.
* @param evt CQ event.
* @return {@code True} if notification should happen immediately, or {@code false} if it should be delayed.
*/
private boolean needNotify(boolean skipEvt, GridCacheContext cctx, int part, long cntr, CacheContinuousQueryEvent evt) {
assert !skipEvt || evt == null;
// part == -1 && cntr == -1 means skip counter.
assert skipEvt || part == -1 && cntr == -1;
if (!cctx.mvccEnabled() || cctx.isLocal())
return true;
assert locInitUpdCntrs != null;
cntr = skipEvt ? cntr : evt.getPartitionUpdateCounter();
part = skipEvt ? part : evt.partitionId();
T2<Long, Long> initCntr = locInitUpdCntrs.get(part);
// Do not notify listener if entry was updated before the query is started.
return initCntr == null || cntr >= initCntr.get2();
}
};
CacheContinuousQueryManager mgr = manager(ctx);
if (mgr == null)
return RegisterStatus.DELAYED;
RegisterStatus regStatus = mgr.registerListener(routineId, lsnr, internal);
if (regStatus == RegisterStatus.REGISTERED)
initFut.listen(res -> sendQueryExecutedEvent());
return regStatus;
}
use of javax.cache.event.CacheEntryEventFilter in project ignite by apache.
the class UsingContinuousQueries method remoteFilterExample.
public static void remoteFilterExample() {
try (Ignite ignite = Ignition.start()) {
IgniteCache<Integer, String> cache = ignite.getOrCreateCache("myCache");
// tag::remoteFilter[]
ContinuousQuery<Integer, String> qry = new ContinuousQuery<>();
qry.setLocalListener(events -> events.forEach(event -> System.out.format("Entry: key=[%s] value=[%s]\n", event.getKey(), event.getValue())));
qry.setRemoteFilterFactory(new Factory<CacheEntryEventFilter<Integer, String>>() {
@Override
public CacheEntryEventFilter<Integer, String> create() {
return new CacheEntryEventFilter<Integer, String>() {
@Override
public boolean evaluate(CacheEntryEvent<? extends Integer, ? extends String> e) {
System.out.format("the value for key [%s] was updated from [%s] to [%s]\n", e.getKey(), e.getOldValue(), e.getValue());
return true;
}
};
}
});
// end::remoteFilter[]
cache.query(qry);
cache.put(1, "1");
}
}
use of javax.cache.event.CacheEntryEventFilter in project ignite by apache.
the class CacheContinuousQueryExample method main.
/**
* Executes example.
*
* @param args Command line arguments, none required.
* @throws Exception If example execution failed.
*/
public static void main(String[] args) throws Exception {
try (Ignite ignite = Ignition.start("examples/config/example-ignite.xml")) {
System.out.println();
System.out.println(">>> Cache continuous query example started.");
// Auto-close cache at the end of the example.
try (IgniteCache<Integer, String> cache = ignite.getOrCreateCache(CACHE_NAME)) {
int keyCnt = 20;
// These entries will be queried by initial predicate.
for (int i = 0; i < keyCnt; i++) cache.put(i, Integer.toString(i));
// Create new continuous query.
ContinuousQuery<Integer, String> qry = new ContinuousQuery<>();
qry.setInitialQuery(new ScanQuery<>(new IgniteBiPredicate<Integer, String>() {
@Override
public boolean apply(Integer key, String val) {
return key > 10;
}
}));
// Callback that is called locally when update notifications are received.
qry.setLocalListener(new CacheEntryUpdatedListener<Integer, String>() {
@Override
public void onUpdated(Iterable<CacheEntryEvent<? extends Integer, ? extends String>> evts) {
for (CacheEntryEvent<? extends Integer, ? extends String> e : evts) System.out.println("Updated entry [key=" + e.getKey() + ", val=" + e.getValue() + ']');
}
});
// This filter will be evaluated remotely on all nodes.
// Entry that pass this filter will be sent to the caller.
qry.setRemoteFilterFactory(new Factory<CacheEntryEventFilter<Integer, String>>() {
@Override
public CacheEntryEventFilter<Integer, String> create() {
return new CacheEntryEventFilter<Integer, String>() {
@Override
public boolean evaluate(CacheEntryEvent<? extends Integer, ? extends String> e) {
return e.getKey() > 10;
}
};
}
});
// Execute query.
try (QueryCursor<Cache.Entry<Integer, String>> cur = cache.query(qry)) {
// Iterate through existing data.
for (Cache.Entry<Integer, String> e : cur) System.out.println("Queried existing entry [key=" + e.getKey() + ", val=" + e.getValue() + ']');
// Add a few more keys and watch more query notifications.
for (int i = keyCnt; i < keyCnt + 10; i++) cache.put(i, Integer.toString(i));
// Wait for a while while callback is notified about remaining puts.
Thread.sleep(2000);
}
} finally {
// Distributed cache could be removed from cluster only by #destroyCache() call.
ignite.destroyCache(CACHE_NAME);
}
}
}
use of javax.cache.event.CacheEntryEventFilter in project ignite by apache.
the class GridP2PContinuousDeploymentClientDisconnectTest method testContinuousQueryRemoteFilterFactory.
/**
* Test starts 1 server node and 1 client node. Class-loading request for the {@link #P2P_TEST_OBJ_RSRC_NAME}
* resource blocks on the client node. The client node tries to deploy CQ with remote filter factory for
* the cache {@link #DEFAULT_CACHE_NAME}.
* Expected that exception with 'Failed to initialize a continuous query.' error message will be thrown and
* the server node wouldn't be failed.
*
* @throws Exception If failed.
*/
@Test
public void testContinuousQueryRemoteFilterFactory() throws Exception {
final Class<Factory<? extends CacheEntryEventFilter<Integer, Integer>>> rmtFilterFactoryCls = (Class<Factory<? extends CacheEntryEventFilter<Integer, Integer>>>) getExternalClassLoader().loadClass(REMOTE_FILTER_FACTORY_CLS_NAME);
AbstractContinuousQuery<Integer, Integer> qry = new ContinuousQuery<Integer, Integer>().setLocalListener(evts -> {
// No-op.
}).setRemoteFilterFactory(rmtFilterFactoryCls.newInstance());
IgniteEx client = grid(1);
LogListener lsnr = LogListener.matches("Failed to initialize a continuous query.").build();
testLog.registerListener(lsnr);
IgniteCache<Integer, Integer> cache = client.cache(DEFAULT_CACHE_NAME);
cache.query(qry);
assertTrue(lsnr.check());
// Check that the failure handler was not called.
assertFalse(failure.get());
}
Aggregations