use of org.apache.ignite.internal.processors.cache.GridCacheAdapter in project ignite by apache.
the class GridEventConsumeHandler method register.
/**
* {@inheritDoc}
*/
@Override
public RegisterStatus register(final UUID nodeId, final UUID routineId, final GridKernalContext ctx) throws IgniteCheckedException {
assert nodeId != null;
assert routineId != null;
assert ctx != null;
if (cb != null)
ctx.resource().injectGeneric(cb);
final boolean loc = nodeId.equals(ctx.localNodeId());
lsnr = new GridLocalEventListener() {
/**
* node ID, routine ID, event
*/
private final Queue<T3<UUID, UUID, Event>> notificationQueue = new LinkedList<>();
private boolean notificationInProgress;
@Override
public void onEvent(Event evt) {
if (filter != null && !filter.apply(evt))
return;
if (loc) {
if (!cb.apply(nodeId, evt))
ctx.continuous().stopRoutine(routineId);
} else {
if (ctx.discovery().node(nodeId) == null)
return;
synchronized (notificationQueue) {
notificationQueue.add(new T3<>(nodeId, routineId, evt));
if (!notificationInProgress) {
ctx.pools().getSystemExecutorService().execute(new Runnable() {
@Override
public void run() {
if (!ctx.continuous().lockStopping())
return;
try {
while (true) {
T3<UUID, UUID, Event> t3;
synchronized (notificationQueue) {
t3 = notificationQueue.poll();
if (t3 == null) {
notificationInProgress = false;
return;
}
}
try {
Event evt = t3.get3();
EventWrapper wrapper = new EventWrapper(evt);
if (evt instanceof CacheEvent) {
String cacheName = ((CacheEvent) evt).cacheName();
ClusterNode node = ctx.discovery().node(t3.get1());
if (node == null)
continue;
if (ctx.config().isPeerClassLoadingEnabled() && ctx.discovery().cacheNode(node, cacheName)) {
GridCacheAdapter cache = ctx.cache().internalCache(cacheName);
if (cache != null && cache.context().deploymentEnabled()) {
wrapper.p2pMarshal(ctx.config().getMarshaller());
wrapper.cacheName = cacheName;
cache.context().deploy().prepare(wrapper);
}
}
}
ctx.continuous().addNotification(t3.get1(), t3.get2(), wrapper, null, false, false);
} catch (ClusterTopologyCheckedException ignored) {
// No-op.
} catch (Throwable e) {
U.error(ctx.log(GridEventConsumeHandler.class), "Failed to send event notification to node: " + nodeId, e);
}
}
} finally {
ctx.continuous().unlockStopping();
}
}
});
notificationInProgress = true;
}
}
}
}
};
if (F.isEmpty(types))
types = EVTS_ALL;
p2pUnmarshalFut.listen((fut) -> {
if (fut.error() == null) {
try {
initFilter(filter, ctx);
} catch (IgniteCheckedException e) {
throw F.wrap(e);
}
ctx.event().addLocalEventListener(lsnr, types);
}
});
return RegisterStatus.REGISTERED;
}
use of org.apache.ignite.internal.processors.cache.GridCacheAdapter in project ignite by apache.
the class GridEventConsumeHandler method notifyCallback.
/**
* @param nodeId Node ID.
* @param objs Notification objects.
*/
@Override
public void notifyCallback(UUID nodeId, UUID routineId, Collection<?> objs, GridKernalContext ctx) {
assert nodeId != null;
assert routineId != null;
assert objs != null;
assert ctx != null;
for (Object obj : objs) {
assert obj instanceof EventWrapper;
EventWrapper wrapper = (EventWrapper) obj;
if (wrapper.bytes != null) {
assert ctx.config().isPeerClassLoadingEnabled();
GridCacheAdapter cache = ctx.cache().internalCache(wrapper.cacheName);
ClassLoader ldr = null;
try {
if (cache != null) {
GridCacheDeploymentManager depMgr = cache.context().deploy();
GridDeploymentInfo depInfo = wrapper.depInfo;
if (depInfo != null) {
depMgr.p2pContext(nodeId, depInfo.classLoaderId(), depInfo.userVersion(), depInfo.deployMode(), depInfo.participants());
}
ldr = depMgr.globalLoader();
} else {
U.warn(ctx.log(getClass()), "Received cache event for cache that is not configured locally " + "when peer class loading is enabled: " + wrapper.cacheName + ". Will try to unmarshal " + "with default class loader.");
}
wrapper.p2pUnmarshal(ctx.config().getMarshaller(), U.resolveClassLoader(ldr, ctx.config()));
} catch (IgniteCheckedException e) {
U.error(ctx.log(getClass()), "Failed to unmarshal event.", e);
}
}
if (!cb.apply(nodeId, wrapper.evt)) {
ctx.continuous().stopRoutine(routineId);
break;
}
}
}
use of org.apache.ignite.internal.processors.cache.GridCacheAdapter in project ignite by apache.
the class GridContinuousProcessor method processStartRequestV2.
/**
* @param topVer Current topology version.
* @param snd Sender.
* @param msg Start request.
*/
private void processStartRequestV2(final AffinityTopologyVersion topVer, final ClusterNode snd, final StartRoutineDiscoveryMessageV2 msg) {
StartRequestDataV2 reqData = msg.startRequestData();
ContinuousRoutineInfo routineInfo = new ContinuousRoutineInfo(snd.id(), msg.routineId(), reqData.handlerBytes(), reqData.nodeFilterBytes(), reqData.bufferSize(), reqData.interval(), reqData.autoUnsubscribe());
routinesInfo.addRoutineInfo(routineInfo);
final DiscoCache discoCache = ctx.discovery().discoCache(topVer);
// Should not use marshaller and send messages from discovery thread.
ctx.pools().getSystemExecutorService().execute(new Runnable() {
@Override
public void run() {
if (snd.id().equals(ctx.localNodeId())) {
StartFuture fut = startFuts.get(msg.routineId());
if (fut != null)
fut.initRemoteNodes(discoCache);
return;
}
StartRequestDataV2 reqData = msg.startRequestData();
Exception err = null;
IgnitePredicate<ClusterNode> nodeFilter = null;
byte[] cntrs = null;
if (reqData.nodeFilterBytes() != null) {
try {
if (ctx.config().isPeerClassLoadingEnabled() && reqData.className() != null) {
String clsName = reqData.className();
GridDeploymentInfo depInfo = reqData.deploymentInfo();
GridDeployment dep = ctx.deploy().getGlobalDeployment(depInfo.deployMode(), clsName, clsName, depInfo.userVersion(), snd.id(), depInfo.classLoaderId(), depInfo.participants(), null);
if (dep == null) {
throw new IgniteDeploymentCheckedException("Failed to obtain deployment " + "for class: " + clsName);
}
nodeFilter = U.unmarshal(marsh, reqData.nodeFilterBytes(), U.resolveClassLoader(dep.classLoader(), ctx.config()));
} else {
nodeFilter = U.unmarshal(marsh, reqData.nodeFilterBytes(), U.resolveClassLoader(ctx.config()));
}
if (nodeFilter != null)
ctx.resource().injectGeneric(nodeFilter);
} catch (Exception e) {
err = e;
U.error(log, "Failed to unmarshal continuous routine filter [" + "routineId=" + msg.routineId + ", srcNodeId=" + snd.id() + ']', e);
}
}
boolean register = err == null && (nodeFilter == null || nodeFilter.apply(ctx.discovery().localNode()));
if (register) {
try {
GridContinuousHandler hnd = U.unmarshal(marsh, reqData.handlerBytes(), U.resolveClassLoader(ctx.config()));
if (ctx.config().isPeerClassLoadingEnabled())
hnd.p2pUnmarshal(snd.id(), ctx);
if (msg.keepBinary()) {
assert hnd instanceof CacheContinuousQueryHandler : hnd;
((CacheContinuousQueryHandler) hnd).keepBinary(true);
}
registerHandler(snd.id(), msg.routineId, hnd, reqData.bufferSize(), reqData.interval(), reqData.autoUnsubscribe(), false);
if (hnd.isQuery()) {
GridCacheProcessor proc = ctx.cache();
if (proc != null) {
GridCacheAdapter cache = ctx.cache().internalCache(hnd.cacheName());
if (cache != null && !cache.isLocal() && cache.context().userCache()) {
CachePartitionPartialCountersMap cntrsMap = cache.context().topology().localUpdateCounters(false);
cntrs = U.marshal(marsh, cntrsMap);
}
}
}
} catch (Exception e) {
err = e;
U.error(log, "Failed to register continuous routine handler [" + "routineId=" + msg.routineId + ", srcNodeId=" + snd.id() + ']', e);
}
}
sendMessageStartResult(snd, msg.routineId(), cntrs, err);
}
});
}
use of org.apache.ignite.internal.processors.cache.GridCacheAdapter in project ignite by apache.
the class IgniteTxPessimisticOriginatingNodeFailureAbstractSelfTest method testTxOriginatingNodeFails.
/**
* @param keys Keys to update.
* @param fullFailure Flag indicating whether to simulate rollback state.
* @throws Exception If failed.
*/
protected void testTxOriginatingNodeFails(Collection<Integer> keys, final boolean fullFailure) throws Exception {
assertFalse(keys.isEmpty());
final Collection<IgniteKernal> grids = new ArrayList<>();
ClusterNode txNode = grid(originatingNode()).localNode();
for (int i = 1; i < gridCount(); i++) grids.add((IgniteKernal) grid(i));
failingNodeId = grid(0).localNode().id();
final Map<Integer, String> map = new HashMap<>();
final String initVal = "initialValue";
for (Integer key : keys) {
grid(originatingNode()).cache(DEFAULT_CACHE_NAME).put(key, initVal);
map.put(key, String.valueOf(key));
}
Map<Integer, Collection<ClusterNode>> nodeMap = new HashMap<>();
info("Node being checked: " + grid(1).localNode().id());
for (Integer key : keys) {
Collection<ClusterNode> nodes = new ArrayList<>();
nodes.addAll(grid(1).affinity(DEFAULT_CACHE_NAME).mapKeyToPrimaryAndBackups(key));
nodes.remove(txNode);
nodeMap.put(key, nodes);
}
info("Starting tx [values=" + map + ", topVer=" + ((IgniteKernal) grid(1)).context().discovery().topologyVersion() + ']');
if (fullFailure)
ignoreMessages(ignoreMessageClasses(), F.asList(grid(1).localNode().id()));
final IgniteEx originatingNodeGrid = grid(originatingNode());
GridTestUtils.runAsync(new Callable<Void>() {
@Override
public Void call() throws Exception {
IgniteCache<Integer, String> cache = originatingNodeGrid.cache(DEFAULT_CACHE_NAME);
assertNotNull(cache);
Transaction tx = originatingNodeGrid.transactions().txStart();
assertEquals(PESSIMISTIC, tx.concurrency());
try {
cache.putAll(map);
info("Before commitAsync");
IgniteFuture<?> fut = tx.commitAsync();
info("Got future for commitAsync().");
fut.get(3, TimeUnit.SECONDS);
} catch (IgniteFutureTimeoutException ignored) {
info("Failed to wait for commit future completion [fullFailure=" + fullFailure + ']');
}
return null;
}
}).get();
info(">>> Stopping originating node " + txNode);
G.stop(grid(originatingNode()).name(), true);
ignoreMessages(Collections.<Class<?>>emptyList(), Collections.<UUID>emptyList());
info(">>> Stopped originating node: " + txNode.id());
boolean txFinished = GridTestUtils.waitForCondition(new GridAbsPredicate() {
@Override
public boolean apply() {
for (IgniteKernal g : grids) {
GridCacheAdapter<?, ?> cache = g.internalCache(DEFAULT_CACHE_NAME);
IgniteTxManager txMgr = cache.isNear() ? ((GridNearCacheAdapter) cache).dht().context().tm() : cache.context().tm();
int txNum = txMgr.idMapSize();
if (txNum != 0)
return false;
}
return true;
}
}, 10000);
assertTrue(txFinished);
info("Transactions finished.");
for (Map.Entry<Integer, Collection<ClusterNode>> e : nodeMap.entrySet()) {
final Integer key = e.getKey();
final String val = map.get(key);
assertFalse(e.getValue().isEmpty());
for (ClusterNode node : e.getValue()) {
final UUID checkNodeId = node.id();
compute(G.ignite(checkNodeId).cluster().forNode(node)).call(new IgniteCallable<Void>() {
/**
*/
@IgniteInstanceResource
private Ignite ignite;
@Override
public Void call() throws Exception {
IgniteCache<Integer, String> cache = ignite.cache(DEFAULT_CACHE_NAME);
assertNotNull(cache);
if (atomicityMode() != TRANSACTIONAL_SNAPSHOT) {
assertEquals("Failed to check entry value on node: " + checkNodeId, fullFailure ? initVal : val, cache.localPeek(key));
}
return null;
}
});
}
}
awaitPartitionMapExchange();
for (Map.Entry<Integer, String> e : map.entrySet()) {
long cntr0 = -1;
for (Ignite g : G.allGrids()) {
Integer key = e.getKey();
assertEquals(fullFailure ? initVal : e.getValue(), g.cache(DEFAULT_CACHE_NAME).get(key));
if (g.affinity(DEFAULT_CACHE_NAME).isPrimaryOrBackup(((IgniteEx) g).localNode(), key)) {
long nodeCntr = updateCoutner(g, key);
if (cntr0 == -1)
cntr0 = nodeCntr;
assertEquals(cntr0, nodeCntr);
}
}
}
}
use of org.apache.ignite.internal.processors.cache.GridCacheAdapter in project ignite by apache.
the class PartitionEvictionOrderTest method testSyncCachesEvictedAtFirst.
/**
* Tests that {@link CacheRebalanceMode#SYNC} caches are evicted at first.
*/
@Test
@WithSystemProperty(key = IgniteSystemProperties.IGNITE_EVICTION_PERMITS, value = "1")
@WithSystemProperty(key = IGNITE_PDS_WAL_REBALANCE_THRESHOLD, value = "500_000")
public void testSyncCachesEvictedAtFirst() throws Exception {
IgniteEx node0 = startGrid(0);
node0.cluster().state(ACTIVE);
IgniteEx node1 = startGrid(1);
node0.cluster().setBaselineTopology(node1.cluster().topologyVersion());
GridCacheAdapter<Object, Object> utilCache0 = grid(0).context().cache().internalCache(CU.UTILITY_CACHE_NAME);
IgniteCache<Object, Object> cache = node0.getOrCreateCache(DEFAULT_CACHE_NAME);
for (int i = 0; i < 1000; i++) {
utilCache0.put(i, i);
cache.put(i, i);
}
awaitPartitionMapExchange();
stopGrid(0);
GridCacheAdapter<Object, Object> utilCache1 = grid(1).context().cache().internalCache(CU.UTILITY_CACHE_NAME);
IgniteInternalCache<Object, Object> cache2 = grid(1).context().cache().cache(DEFAULT_CACHE_NAME);
for (int i = 0; i < 2000; i++) {
try {
cache2.put(i, i + 1);
utilCache1.put(i, i + 1);
} catch (IgniteCheckedException e) {
e.printStackTrace();
}
}
List<T2<Integer, Integer>> evictionOrder = Collections.synchronizedList(new ArrayList<>());
TestDependencyResolver rslvr = new TestDependencyResolver(new DependencyResolver() {
@Override
public <T> T resolve(T instance) {
if (instance instanceof GridDhtPartitionTopologyImpl) {
GridDhtPartitionTopologyImpl top = (GridDhtPartitionTopologyImpl) instance;
top.partitionFactory((ctx, grp, id, recovery) -> new GridDhtLocalPartition(ctx, grp, id, recovery) {
@Override
public long clearAll(EvictionContext evictionCtx) throws NodeStoppingException {
evictionOrder.add(new T2<>(grp.groupId(), id));
return super.clearAll(evictionCtx);
}
});
}
return instance;
}
});
startGrid(0, rslvr);
awaitPartitionMapExchange(true, true, null);
assertEquals(utilCache0.affinity().partitions() + grid(0).cachex(DEFAULT_CACHE_NAME).affinity().partitions(), evictionOrder.size());
for (int i = 0; i < utilCache0.affinity().partitions(); i++) assertEquals(CU.UTILITY_CACHE_GROUP_ID, evictionOrder.get(i).get1().intValue());
}
Aggregations