use of org.apache.ignite.internal.processors.cache.GridCacheAdapter in project ignite by apache.
the class IgniteDrDataStreamerCacheUpdater method receive.
/**
* {@inheritDoc}
*/
@Override
public void receive(IgniteCache<KeyCacheObject, CacheObject> cache0, Collection<Map.Entry<KeyCacheObject, CacheObject>> col) {
try {
String cacheName = cache0.getConfiguration(CacheConfiguration.class).getName();
GridKernalContext ctx = ((IgniteKernal) cache0.unwrap(Ignite.class)).context();
IgniteLogger log = ctx.log(IgniteDrDataStreamerCacheUpdater.class);
GridCacheAdapter internalCache = ctx.cache().internalCache(cacheName);
CacheOperationContext opCtx = ((IgniteCacheProxy) cache0).context().operationContextPerCall();
IgniteInternalCache cache = opCtx != null ? new GridCacheProxyImpl(internalCache.context(), internalCache, opCtx) : internalCache;
assert !F.isEmpty(col);
if (log.isDebugEnabled())
log.debug("Running DR put job [nodeId=" + ctx.localNodeId() + ", cacheName=" + cacheName + ']');
CacheObjectContext cacheObjCtx = cache.context().cacheObjectContext();
for (Map.Entry<KeyCacheObject, CacheObject> entry0 : col) {
GridCacheRawVersionedEntry entry = (GridCacheRawVersionedEntry) entry0;
entry.unmarshal(cacheObjCtx, ctx.config().getMarshaller());
KeyCacheObject key = entry.getKey();
// Ensure that receiver to not receive special-purpose values for TTL and expire time.
assert entry.ttl() != CU.TTL_NOT_CHANGED && entry.ttl() != CU.TTL_ZERO && entry.ttl() >= 0;
assert entry.expireTime() != CU.EXPIRE_TIME_CALCULATE && entry.expireTime() >= 0;
CacheObject cacheVal = entry.getValue();
GridCacheDrInfo val = cacheVal != null ? entry.ttl() != CU.TTL_ETERNAL ? new GridCacheDrExpirationInfo(cacheVal, entry.version(), entry.ttl(), entry.expireTime()) : new GridCacheDrInfo(cacheVal, entry.version()) : null;
if (val == null)
cache.removeAllConflict(Collections.singletonMap(key, entry.version()));
else
cache.putAllConflict(Collections.singletonMap(key, val));
}
if (log.isDebugEnabled())
log.debug("DR put job finished [nodeId=" + ctx.localNodeId() + ", cacheName=" + cacheName + ']');
} catch (IgniteCheckedException e) {
throw U.convertException(e);
}
}
use of org.apache.ignite.internal.processors.cache.GridCacheAdapter in project ignite by apache.
the class GridCommandHandlerConsistencyTest method fillCache.
/**
*/
private void fillCache(String name, Ignite filtered, boolean incVal) throws Exception {
for (Ignite node : G.allGrids()) {
if (node.equals(filtered))
continue;
while (// Waiting for cache internals to init.
((IgniteEx) node).cachex(name) == null) U.sleep(1);
}
GridCacheVersionManager mgr = ((GridCacheAdapter) (grid(1)).cachex(name).cache()).context().shared().versions();
for (int key = 0; key < PARTITIONS; key++) {
List<Ignite> nodes = new ArrayList<>();
nodes.add(primaryNode(key, name));
nodes.addAll(backupNodes(key, name));
Collections.shuffle(nodes);
int val = key;
Object obj;
for (Ignite node : nodes) {
IgniteInternalCache cache = ((IgniteEx) node).cachex(name);
GridCacheAdapter adapter = ((GridCacheAdapter) cache.cache());
GridCacheEntryEx entry = adapter.entryEx(key);
val = incVal ? ++val : val;
if (binaryCache()) {
BinaryObjectBuilder builder = node.binary().builder("org.apache.ignite.TestValue");
builder.setField("val", val);
obj = builder.build();
} else
obj = val;
boolean init = entry.initialValue(// Incremental or same value.
new CacheObjectImpl(obj, null), // Incremental version.
mgr.next(entry.context().kernalContext().discovery().topologyVersion()), 0, 0, false, AffinityTopologyVersion.NONE, GridDrType.DR_NONE, false, false);
assertTrue("iterableKey " + key + " already inited", init);
}
}
}
use of org.apache.ignite.internal.processors.cache.GridCacheAdapter in project ignite by apache.
the class DataStreamProcessor method localUpdate.
/**
* @param nodeId Node id.
* @param req Request.
* @param updater Updater.
* @param topic Topic.
*/
private void localUpdate(final UUID nodeId, final DataStreamerRequest req, final StreamReceiver<K, V> updater, final Object topic) {
final boolean allowOverwrite = !(updater instanceof DataStreamerImpl.IsolatedUpdater);
try {
GridCacheAdapter cache = ctx.cache().internalCache(req.cacheName());
if (cache == null) {
throw new IgniteCheckedException("Cache not created or already destroyed: " + req.cacheName());
}
GridCacheContext cctx = cache.context();
DataStreamerUpdateJob job = null;
GridFutureAdapter waitFut = null;
if (!allowOverwrite)
cctx.topology().readLock();
GridDhtTopologyFuture topWaitFut = null;
try {
Exception remapErr = null;
AffinityTopologyVersion streamerFutTopVer = null;
if (!allowOverwrite) {
GridDhtTopologyFuture topFut = cctx.topologyVersionFuture();
AffinityTopologyVersion topVer = topFut.isDone() ? topFut.topologyVersion() : topFut.initialVersion();
if (topVer.compareTo(req.topologyVersion()) > 0) {
remapErr = new ClusterTopologyCheckedException("DataStreamer will retry " + "data transfer at stable topology [reqTop=" + req.topologyVersion() + ", topVer=" + topFut.initialVersion() + ", node=remote]");
} else if (!topFut.isDone())
topWaitFut = topFut;
else
streamerFutTopVer = topFut.topologyVersion();
}
if (remapErr != null) {
sendResponse(nodeId, topic, req.requestId(), remapErr, req.forceLocalDeployment());
return;
} else if (topWaitFut == null) {
job = new DataStreamerUpdateJob(ctx, log, req.cacheName(), req.entries(), req.ignoreDeploymentOwnership(), req.skipStore(), req.keepBinary(), updater);
waitFut = allowOverwrite ? null : cctx.mvcc().addDataStreamerFuture(streamerFutTopVer);
}
} finally {
if (!allowOverwrite)
cctx.topology().readUnlock();
}
if (topWaitFut != null) {
// Need call 'listen' after topology read lock is released.
topWaitFut.listen(new IgniteInClosure<IgniteInternalFuture<AffinityTopologyVersion>>() {
@Override
public void apply(IgniteInternalFuture<AffinityTopologyVersion> e) {
localUpdate(nodeId, req, updater, topic);
}
});
return;
}
try {
job.call();
sendResponse(nodeId, topic, req.requestId(), null, req.forceLocalDeployment());
} finally {
if (waitFut != null)
waitFut.onDone();
}
} catch (Throwable e) {
sendResponse(nodeId, topic, req.requestId(), e, req.forceLocalDeployment());
if (e instanceof Error)
throw (Error) e;
}
}
use of org.apache.ignite.internal.processors.cache.GridCacheAdapter in project ignite by apache.
the class DataStreamerImpl method load0.
/**
* @param entries Entries.
* @param resFut Result future.
* @param activeKeys Active keys.
* @param remaps Remaps count.
* @param remapNode Node for remap. In case update with {@code allowOverride() == false} fails on one node,
* we don't need to send update request to all affinity nodes again, if topology version does not changed.
* @param remapTopVer Topology version.
*/
private void load0(Collection<? extends DataStreamerEntry> entries, final GridFutureAdapter<Object> resFut, @Nullable final Collection<KeyCacheObjectWrapper> activeKeys, final int remaps, ClusterNode remapNode, AffinityTopologyVersion remapTopVer) {
try {
assert entries != null;
final boolean remap = remaps > 0;
if (!remap) {
// Failed data should be processed prior to new data.
acquireRemapSemaphore();
}
if (!isWarningPrinted) {
synchronized (this) {
if (!allowOverwrite() && !isWarningPrinted) {
U.warn(log, "Data streamer will not overwrite existing cache entries for better performance " + "(to change, set allowOverwrite to true)");
}
isWarningPrinted = true;
}
}
Map<ClusterNode, Collection<DataStreamerEntry>> mappings = new HashMap<>();
boolean initPda = ctx.deploy().enabled() && jobPda == null;
GridCacheAdapter cache = ctx.cache().internalCache(cacheName);
if (cache == null)
throw new IgniteCheckedException("Cache not created or already destroyed.");
GridCacheContext cctx = cache.context();
GridCacheGateway gate = null;
AffinityTopologyVersion topVer;
if (!cctx.isLocal()) {
GridDhtPartitionsExchangeFuture exchFut = ctx.cache().context().exchange().lastTopologyFuture();
if (!exchFut.isDone()) {
ExchangeActions acts = exchFut.exchangeActions();
if (acts != null && acts.cacheStopped(CU.cacheId(cacheName)))
throw new CacheStoppedException(cacheName);
}
// It is safe to block here even if the cache gate is acquired.
topVer = exchFut.get();
} else
topVer = ctx.cache().context().exchange().readyAffinityVersion();
List<List<ClusterNode>> assignments = cctx.affinity().assignments(topVer);
if (!allowOverwrite() && !cctx.isLocal()) {
// Cases where cctx required.
gate = cctx.gate();
gate.enter();
}
try {
for (DataStreamerEntry entry : entries) {
List<ClusterNode> nodes;
try {
KeyCacheObject key = entry.getKey();
assert key != null;
if (initPda) {
if (cacheObjCtx.addDeploymentInfo())
jobPda = new DataStreamerPda(key.value(cacheObjCtx, false), entry.getValue() != null ? entry.getValue().value(cacheObjCtx, false) : null, rcvr);
else if (rcvr != null)
jobPda = new DataStreamerPda(rcvr);
initPda = false;
}
if (key.partition() == -1)
key.partition(cctx.affinity().partition(key, false));
if (!allowOverwrite() && remapNode != null && F.eq(topVer, remapTopVer))
nodes = Collections.singletonList(remapNode);
else
nodes = nodes(key, topVer, cctx);
} catch (IgniteCheckedException e) {
resFut.onDone(e);
return;
}
if (F.isEmpty(nodes)) {
resFut.onDone(new ClusterTopologyException("Failed to map key to node " + "(no nodes with cache found in topology) [infos=" + entries.size() + ", cacheName=" + cacheName + ']'));
return;
}
for (ClusterNode node : nodes) {
Collection<DataStreamerEntry> col = mappings.get(node);
if (col == null)
mappings.put(node, col = new ArrayList<>());
col.add(entry);
}
}
for (final Map.Entry<ClusterNode, Collection<DataStreamerEntry>> e : mappings.entrySet()) {
final ClusterNode node = e.getKey();
final UUID nodeId = e.getKey().id();
Buffer buf = bufMappings.get(nodeId);
if (buf == null) {
Buffer old = bufMappings.putIfAbsent(nodeId, buf = new Buffer(e.getKey()));
if (old != null)
buf = old;
}
final Collection<DataStreamerEntry> entriesForNode = e.getValue();
IgniteInClosure<IgniteInternalFuture<?>> lsnr = new IgniteInClosure<IgniteInternalFuture<?>>() {
@Override
public void apply(IgniteInternalFuture<?> t) {
try {
t.get();
if (activeKeys != null) {
for (DataStreamerEntry e : entriesForNode) activeKeys.remove(new KeyCacheObjectWrapper(e.getKey()));
if (activeKeys.isEmpty())
resFut.onDone();
} else {
assert entriesForNode.size() == 1;
// That has been a single key,
// so complete result future right away.
resFut.onDone();
}
} catch (IgniteClientDisconnectedCheckedException e1) {
if (log.isDebugEnabled())
log.debug("Future finished with disconnect error [nodeId=" + nodeId + ", err=" + e1 + ']');
resFut.onDone(e1);
} catch (IgniteCheckedException e1) {
if (log.isDebugEnabled())
log.debug("Future finished with error [nodeId=" + nodeId + ", err=" + e1 + ']');
if (cancelled) {
resFut.onDone(new IgniteCheckedException("Data streamer has been cancelled: " + DataStreamerImpl.this, e1));
} else if (remaps + 1 > maxRemapCnt) {
resFut.onDone(new IgniteCheckedException("Failed to finish operation (too many remaps): " + remaps, e1));
} else if (X.hasCause(e1, IgniteClusterReadOnlyException.class)) {
resFut.onDone(new IgniteClusterReadOnlyException("Failed to finish operation. Cluster in read-only mode!", e1));
} else {
try {
remapSem.acquire();
final Runnable r = new Runnable() {
@Override
public void run() {
try {
if (cancelled)
closedException();
load0(entriesForNode, resFut, activeKeys, remaps + 1, node, topVer);
} catch (Throwable ex) {
resFut.onDone(new IgniteCheckedException("DataStreamer remapping failed. ", ex));
} finally {
remapSem.release();
}
}
};
dataToRemap.add(r);
if (!remapOwning.get() && remapOwning.compareAndSet(false, true)) {
ctx.closure().callLocalSafe(new GPC<Boolean>() {
@Override
public Boolean call() {
boolean locked = true;
while (locked || !dataToRemap.isEmpty()) {
if (!locked && !remapOwning.compareAndSet(false, true))
return false;
try {
Runnable r = dataToRemap.poll();
if (r != null)
r.run();
} finally {
if (!dataToRemap.isEmpty())
locked = true;
else {
remapOwning.set(false);
locked = false;
}
}
}
return true;
}
}, true);
}
} catch (InterruptedException e2) {
resFut.onDone(e2);
}
}
}
}
};
GridCompoundFuture opFut = new SilentCompoundFuture();
opFut.listen(lsnr);
final List<GridFutureAdapter<?>> futs;
try {
futs = buf.update(entriesForNode, topVer, assignments, opFut, remap);
opFut.markInitialized();
} catch (IgniteInterruptedCheckedException e1) {
resFut.onDone(e1);
return;
}
if (ctx.discovery().node(nodeId) == null) {
if (bufMappings.remove(nodeId, buf)) {
final Buffer buf0 = buf;
waitAffinityAndRun(new GridPlainRunnable() {
@Override
public void run() {
buf0.onNodeLeft();
if (futs != null) {
Throwable ex = new ClusterTopologyCheckedException("Failed to wait for request completion (node has left): " + nodeId);
for (int i = 0; i < futs.size(); i++) futs.get(i).onDone(ex);
}
}
}, ctx.discovery().topologyVersion(), false);
}
}
}
} finally {
if (gate != null)
gate.leave();
}
} catch (Exception ex) {
resFut.onDone(new IgniteCheckedException("DataStreamer data loading failed.", ex));
}
}
use of org.apache.ignite.internal.processors.cache.GridCacheAdapter in project ignite by apache.
the class GridContinuousProcessor method processStartRequest.
/**
* @param node Sender.
* @param req Start request.
*/
private void processStartRequest(ClusterNode node, StartRoutineDiscoveryMessage req) {
if (node.id().equals(ctx.localNodeId()))
return;
UUID routineId = req.routineId();
if (req.deserializationException() != null && checkNodeFilter(req)) {
IgniteCheckedException err = new IgniteCheckedException(req.deserializationException());
req.addError(node.id(), err);
U.error(log, "Failed to register handler [nodeId=" + node.id() + ", routineId=" + routineId + ']', err);
return;
}
StartRequestData data = req.startRequestData();
GridContinuousHandler hnd = data.handler();
if (req.keepBinary()) {
assert hnd instanceof CacheContinuousQueryHandler;
((CacheContinuousQueryHandler) hnd).keepBinary(true);
}
IgniteCheckedException err = null;
try {
if (ctx.config().isPeerClassLoadingEnabled()) {
String clsName = data.className();
if (clsName != null) {
GridDeploymentInfo depInfo = data.deploymentInfo();
GridDeployment dep = ctx.deploy().getGlobalDeployment(depInfo.deployMode(), clsName, clsName, depInfo.userVersion(), node.id(), depInfo.classLoaderId(), depInfo.participants(), null);
if (dep == null)
throw new IgniteDeploymentCheckedException("Failed to obtain deployment for class: " + clsName);
data.p2pUnmarshal(marsh, U.resolveClassLoader(dep.classLoader(), ctx.config()));
}
}
} catch (IgniteCheckedException e) {
err = e;
U.error(log, "Failed to register handler [nodeId=" + node.id() + ", routineId=" + routineId + ']', e);
}
if (node.isClient()) {
Map<UUID, LocalRoutineInfo> clientRoutineMap = clientInfos.get(node.id());
if (clientRoutineMap == null) {
clientRoutineMap = new HashMap<>();
Map<UUID, LocalRoutineInfo> old = clientInfos.put(node.id(), clientRoutineMap);
assert old == null;
}
clientRoutineMap.put(routineId, new LocalRoutineInfo(node.id(), data.projectionPredicate(), hnd, data.bufferSize(), data.interval(), data.autoUnsubscribe()));
}
if (err == null) {
try {
IgnitePredicate<ClusterNode> prjPred = data.projectionPredicate();
if (prjPred != null)
ctx.resource().injectGeneric(prjPred);
if ((prjPred == null || prjPred.apply(ctx.discovery().node(ctx.localNodeId()))) && !locInfos.containsKey(routineId)) {
if (ctx.config().isPeerClassLoadingEnabled())
hnd.p2pUnmarshal(node.id(), ctx);
registerHandler(node.id(), routineId, hnd, data.bufferSize(), data.interval(), data.autoUnsubscribe(), false);
// Load partition counters.
if (err == null && hnd.isQuery()) {
GridCacheProcessor proc = ctx.cache();
if (proc != null) {
GridCacheAdapter cache = ctx.cache().internalCache(hnd.cacheName());
if (cache != null && !cache.isLocal() && cache.context().userCache())
req.addUpdateCounters(ctx.localNodeId(), hnd.updateCounters());
}
}
}
if (!data.autoUnsubscribe())
// Register routine locally.
locInfos.putIfAbsent(routineId, new LocalRoutineInfo(node.id(), prjPred, hnd, data.bufferSize(), data.interval(), data.autoUnsubscribe()));
} catch (IgniteCheckedException e) {
err = e;
U.error(log, "Failed to register handler [nodeId=" + node.id() + ", routineId=" + routineId + ']', e);
}
}
if (err != null)
req.addError(ctx.localNodeId(), err);
}
Aggregations