use of org.apache.ignite.events.CacheEvent in project ignite by apache.
the class GridEventConsumeHandler method register.
/** {@inheritDoc} */
@Override
public RegisterStatus register(final UUID nodeId, final UUID routineId, final GridKernalContext ctx) throws IgniteCheckedException {
assert nodeId != null;
assert routineId != null;
assert ctx != null;
if (cb != null)
ctx.resource().injectGeneric(cb);
if (filter != null)
ctx.resource().injectGeneric(filter);
if (filter instanceof PlatformEventFilterListener)
((PlatformEventFilterListener) filter).initialize(ctx);
final boolean loc = nodeId.equals(ctx.localNodeId());
lsnr = new GridLocalEventListener() {
/** node ID, routine ID, event */
private final Queue<T3<UUID, UUID, Event>> notificationQueue = new LinkedList<>();
private boolean notificationInProgress;
@Override
public void onEvent(Event evt) {
if (filter != null && !filter.apply(evt))
return;
if (loc) {
if (!cb.apply(nodeId, evt))
ctx.continuous().stopRoutine(routineId);
} else {
if (ctx.discovery().node(nodeId) == null)
return;
synchronized (notificationQueue) {
notificationQueue.add(new T3<>(nodeId, routineId, evt));
if (!notificationInProgress) {
ctx.getSystemExecutorService().execute(new Runnable() {
@Override
public void run() {
if (!ctx.continuous().lockStopping())
return;
try {
while (true) {
T3<UUID, UUID, Event> t3;
synchronized (notificationQueue) {
t3 = notificationQueue.poll();
if (t3 == null) {
notificationInProgress = false;
return;
}
}
try {
Event evt = t3.get3();
EventWrapper wrapper = new EventWrapper(evt);
if (evt instanceof CacheEvent) {
String cacheName = ((CacheEvent) evt).cacheName();
ClusterNode node = ctx.discovery().node(t3.get1());
if (node == null)
continue;
if (ctx.config().isPeerClassLoadingEnabled()) {
GridCacheContext cctx = ctx.cache().internalCache(cacheName).context();
if (cctx.deploymentEnabled() && ctx.discovery().cacheNode(node, cacheName)) {
wrapper.p2pMarshal(ctx.config().getMarshaller());
wrapper.cacheName = cacheName;
cctx.deploy().prepare(wrapper);
}
}
}
ctx.continuous().addNotification(t3.get1(), t3.get2(), wrapper, null, false, false);
} catch (ClusterTopologyCheckedException ignored) {
// No-op.
} catch (Throwable e) {
U.error(ctx.log(GridEventConsumeHandler.class), "Failed to send event notification to node: " + nodeId, e);
}
}
} finally {
ctx.continuous().unlockStopping();
}
}
});
notificationInProgress = true;
}
}
}
}
};
if (F.isEmpty(types))
types = EVTS_ALL;
ctx.event().addLocalEventListener(lsnr, types);
return RegisterStatus.REGISTERED;
}
use of org.apache.ignite.events.CacheEvent in project ignite by apache.
the class GridDhtPartitionsExchangeFuture method warnNoAffinityNodes.
/**
*
*/
private void warnNoAffinityNodes() {
List<String> cachesWithoutNodes = null;
for (String name : cctx.cache().cacheNames()) {
if (discoCache.cacheAffinityNodes(name).isEmpty()) {
if (cachesWithoutNodes == null)
cachesWithoutNodes = new ArrayList<>();
cachesWithoutNodes.add(name);
// Fire event even if there is no client cache started.
if (cctx.gridEvents().isRecordable(EventType.EVT_CACHE_NODES_LEFT)) {
Event evt = new CacheEvent(name, cctx.localNode(), cctx.localNode(), "All server nodes have left the cluster.", EventType.EVT_CACHE_NODES_LEFT, 0, false, null, null, null, null, false, null, false, null, null, null);
cctx.gridEvents().record(evt);
}
}
}
if (cachesWithoutNodes != null) {
StringBuilder sb = new StringBuilder("All server nodes for the following caches have left the cluster: ");
for (int i = 0; i < cachesWithoutNodes.size(); i++) {
String cache = cachesWithoutNodes.get(i);
sb.append('\'').append(cache).append('\'');
if (i != cachesWithoutNodes.size() - 1)
sb.append(", ");
}
U.quietAndWarn(log, sb.toString());
U.quietAndWarn(log, "Must have server nodes for caches to operate.");
}
}
use of org.apache.ignite.events.CacheEvent in project ignite by apache.
the class PlatformContextImpl method writeEvent.
/** {@inheritDoc} */
@Override
public void writeEvent(BinaryRawWriterEx writer, Event evt) {
assert writer != null;
if (evt == null) {
writer.writeInt(-1);
return;
}
EventAdapter evt0 = (EventAdapter) evt;
if (evt0 instanceof CacheEvent) {
writer.writeInt(2);
writeCommonEventData(writer, evt0);
CacheEvent event0 = (CacheEvent) evt0;
writer.writeString(event0.cacheName());
writer.writeInt(event0.partition());
writer.writeBoolean(event0.isNear());
writeNode(writer, event0.eventNode());
writer.writeObject(event0.key());
writer.writeObject(event0.xid());
writer.writeObject(event0.newValue());
writer.writeObject(event0.oldValue());
writer.writeBoolean(event0.hasOldValue());
writer.writeBoolean(event0.hasNewValue());
writer.writeUuid(event0.subjectId());
writer.writeString(event0.closureClassName());
writer.writeString(event0.taskName());
} else if (evt0 instanceof CacheQueryExecutedEvent) {
writer.writeInt(3);
writeCommonEventData(writer, evt0);
CacheQueryExecutedEvent event0 = (CacheQueryExecutedEvent) evt0;
writer.writeString(event0.queryType());
writer.writeString(event0.cacheName());
writer.writeString(event0.className());
writer.writeString(event0.clause());
writer.writeUuid(event0.subjectId());
writer.writeString(event0.taskName());
} else if (evt0 instanceof CacheQueryReadEvent) {
writer.writeInt(4);
writeCommonEventData(writer, evt0);
CacheQueryReadEvent event0 = (CacheQueryReadEvent) evt0;
writer.writeString(event0.queryType());
writer.writeString(event0.cacheName());
writer.writeString(event0.className());
writer.writeString(event0.clause());
writer.writeUuid(event0.subjectId());
writer.writeString(event0.taskName());
writer.writeObject(event0.key());
writer.writeObject(event0.value());
writer.writeObject(event0.oldValue());
writer.writeObject(event0.row());
} else if (evt0 instanceof CacheRebalancingEvent) {
writer.writeInt(5);
writeCommonEventData(writer, evt0);
CacheRebalancingEvent event0 = (CacheRebalancingEvent) evt0;
writer.writeString(event0.cacheName());
writer.writeInt(event0.partition());
writeNode(writer, event0.discoveryNode());
writer.writeInt(event0.discoveryEventType());
writer.writeString(event0.discoveryEventName());
writer.writeLong(event0.discoveryTimestamp());
} else if (evt0 instanceof CheckpointEvent) {
writer.writeInt(6);
writeCommonEventData(writer, evt0);
CheckpointEvent event0 = (CheckpointEvent) evt0;
writer.writeString(event0.key());
} else if (evt0 instanceof DiscoveryEvent) {
writer.writeInt(7);
writeCommonEventData(writer, evt0);
DiscoveryEvent event0 = (DiscoveryEvent) evt0;
writeNode(writer, event0.eventNode());
writer.writeLong(event0.topologyVersion());
writeNodes(writer, event0.topologyNodes());
} else if (evt0 instanceof JobEvent) {
writer.writeInt(8);
writeCommonEventData(writer, evt0);
JobEvent event0 = (JobEvent) evt0;
writer.writeString(event0.taskName());
writer.writeString(event0.taskClassName());
writer.writeObject(event0.taskSessionId());
writer.writeObject(event0.jobId());
writeNode(writer, event0.taskNode());
writer.writeUuid(event0.taskSubjectId());
} else if (evt0 instanceof TaskEvent) {
writer.writeInt(10);
writeCommonEventData(writer, evt0);
TaskEvent event0 = (TaskEvent) evt0;
writer.writeString(event0.taskName());
writer.writeString(event0.taskClassName());
writer.writeObject(event0.taskSessionId());
writer.writeBoolean(event0.internal());
writer.writeUuid(event0.subjectId());
} else
throw new IgniteException("Unsupported event: " + evt);
}
use of org.apache.ignite.events.CacheEvent in project ignite by apache.
the class GridCachePartitionedUnloadEventsSelfTest method checkObjectUnloadEvents.
/**
* @param evts Events.
* @param g Grid.
* @param keys Keys.
*/
private void checkObjectUnloadEvents(Collection<Event> evts, Ignite g, Collection<?> keys) {
assertEquals(keys.size(), evts.size());
for (Event evt : evts) {
CacheEvent cacheEvt = ((CacheEvent) evt);
assertEquals(EVT_CACHE_REBALANCE_OBJECT_UNLOADED, cacheEvt.type());
assertEquals(g.cache(DEFAULT_CACHE_NAME).getName(), cacheEvt.cacheName());
assertEquals(g.cluster().localNode().id(), cacheEvt.node().id());
assertEquals(g.cluster().localNode().id(), cacheEvt.eventNode().id());
assertTrue("Unexpected key: " + cacheEvt.key(), keys.contains(cacheEvt.key()));
}
}
use of org.apache.ignite.events.CacheEvent in project ignite by apache.
the class IgniteSourceConnectorTest method doTest.
/**
* Tests the source with the specified source configurations.
*
* @param srcProps Source properties.
* @param conditioned Flag indicating whether filtering is enabled.
* @throws Exception Fails if error.
*/
private void doTest(Map<String, String> srcProps, boolean conditioned) throws Exception {
FutureCallback<Herder.Created<ConnectorInfo>> cb = new FutureCallback<>(new Callback<Herder.Created<ConnectorInfo>>() {
@Override
public void onCompletion(Throwable error, Herder.Created<ConnectorInfo> info) {
if (error != null)
throw new RuntimeException("Failed to create a job!", error);
}
});
herder.putConnectorConfig(srcProps.get(ConnectorConfig.NAME_CONFIG), srcProps, true, cb);
cb.get();
// Ugh! To be sure Kafka Connect's worker thread is properly started...
Thread.sleep(5000);
final CountDownLatch latch = new CountDownLatch(EVENT_CNT);
final IgnitePredicate<CacheEvent> locLsnr = new IgnitePredicate<CacheEvent>() {
@Override
public boolean apply(CacheEvent evt) {
assert evt != null;
latch.countDown();
return true;
}
};
grid.events(grid.cluster().forCacheNodes(CACHE_NAME)).localListen(locLsnr, EVT_CACHE_OBJECT_PUT);
IgniteCache<String, String> cache = grid.cache(CACHE_NAME);
assertEquals(0, cache.size(CachePeekMode.PRIMARY));
Map<String, String> keyValMap = new HashMap<>(EVENT_CNT);
keyValMap.putAll(sendData());
// Checks all events are processed.
assertTrue(latch.await(10, TimeUnit.SECONDS));
grid.events(grid.cluster().forCacheNodes(CACHE_NAME)).stopLocalListen(locLsnr);
assertEquals(EVENT_CNT, cache.size(CachePeekMode.PRIMARY));
// Checks the events are transferred to Kafka broker.
if (conditioned)
checkDataDelivered(EVENT_CNT * TOPICS.length / 2);
else
checkDataDelivered(EVENT_CNT * TOPICS.length);
}
Aggregations