use of org.apache.ignite.events.DiscoveryEvent in project ignite by apache.
the class GridCacheDistributedQueryManager method start0.
/** {@inheritDoc} */
@Override
public void start0() throws IgniteCheckedException {
super.start0();
assert cctx.config().getCacheMode() != LOCAL;
cctx.io().addHandler(cctx.cacheId(), GridCacheQueryRequest.class, new CI2<UUID, GridCacheQueryRequest>() {
@Override
public void apply(UUID nodeId, GridCacheQueryRequest req) {
processQueryRequest(nodeId, req);
}
});
lsnr = new GridLocalEventListener() {
@Override
public void onEvent(Event evt) {
DiscoveryEvent discoEvt = (DiscoveryEvent) evt;
for (GridCacheDistributedQueryFuture fut : futs.values()) fut.onNodeLeft(discoEvt.eventNode().id());
}
};
cctx.events().addListener(lsnr, EVT_NODE_LEFT, EVT_NODE_FAILED);
}
use of org.apache.ignite.events.DiscoveryEvent in project ignite by apache.
the class IgfsDataManager method start0.
/** {@inheritDoc} */
@Override
protected void start0() throws IgniteCheckedException {
dataCacheStartLatch = new CountDownLatch(1);
String igfsName = igfsCtx.configuration().getName();
topic = F.isEmpty(igfsName) ? TOPIC_IGFS : TOPIC_IGFS.topic(igfsName);
igfsCtx.kernalContext().io().addMessageListener(topic, new GridMessageListener() {
@Override
public void onMessage(UUID nodeId, Object msg) {
if (msg instanceof IgfsBlocksMessage)
processBlocksMessage(nodeId, (IgfsBlocksMessage) msg);
else if (msg instanceof IgfsAckMessage)
processAckMessage(nodeId, (IgfsAckMessage) msg);
}
});
igfsCtx.kernalContext().event().addLocalEventListener(new GridLocalEventListener() {
@Override
public void onEvent(Event evt) {
assert evt.type() == EVT_NODE_FAILED || evt.type() == EVT_NODE_LEFT;
DiscoveryEvent discoEvt = (DiscoveryEvent) evt;
if (igfsCtx.igfsNode(discoEvt.eventNode())) {
for (WriteCompletionFuture future : pendingWrites.values()) {
future.onError(discoEvt.eventNode().id(), new ClusterTopologyCheckedException("Node left grid before write completed: " + evt.node().id()));
}
}
}
}, EVT_NODE_LEFT, EVT_NODE_FAILED);
delWorker = new AsyncDeleteWorker(igfsCtx.kernalContext().igniteInstanceName(), "igfs-" + igfsName + "-delete-worker", log);
dataCacheName = igfsCtx.configuration().getDataCacheConfiguration().getName();
}
use of org.apache.ignite.events.DiscoveryEvent in project ignite by apache.
the class PlatformContextImpl method writeEvent.
/** {@inheritDoc} */
@Override
public void writeEvent(BinaryRawWriterEx writer, Event evt) {
assert writer != null;
if (evt == null) {
writer.writeInt(-1);
return;
}
EventAdapter evt0 = (EventAdapter) evt;
if (evt0 instanceof CacheEvent) {
writer.writeInt(2);
writeCommonEventData(writer, evt0);
CacheEvent event0 = (CacheEvent) evt0;
writer.writeString(event0.cacheName());
writer.writeInt(event0.partition());
writer.writeBoolean(event0.isNear());
writeNode(writer, event0.eventNode());
writer.writeObject(event0.key());
writer.writeObject(event0.xid());
writer.writeObject(event0.newValue());
writer.writeObject(event0.oldValue());
writer.writeBoolean(event0.hasOldValue());
writer.writeBoolean(event0.hasNewValue());
writer.writeUuid(event0.subjectId());
writer.writeString(event0.closureClassName());
writer.writeString(event0.taskName());
} else if (evt0 instanceof CacheQueryExecutedEvent) {
writer.writeInt(3);
writeCommonEventData(writer, evt0);
CacheQueryExecutedEvent event0 = (CacheQueryExecutedEvent) evt0;
writer.writeString(event0.queryType());
writer.writeString(event0.cacheName());
writer.writeString(event0.className());
writer.writeString(event0.clause());
writer.writeUuid(event0.subjectId());
writer.writeString(event0.taskName());
} else if (evt0 instanceof CacheQueryReadEvent) {
writer.writeInt(4);
writeCommonEventData(writer, evt0);
CacheQueryReadEvent event0 = (CacheQueryReadEvent) evt0;
writer.writeString(event0.queryType());
writer.writeString(event0.cacheName());
writer.writeString(event0.className());
writer.writeString(event0.clause());
writer.writeUuid(event0.subjectId());
writer.writeString(event0.taskName());
writer.writeObject(event0.key());
writer.writeObject(event0.value());
writer.writeObject(event0.oldValue());
writer.writeObject(event0.row());
} else if (evt0 instanceof CacheRebalancingEvent) {
writer.writeInt(5);
writeCommonEventData(writer, evt0);
CacheRebalancingEvent event0 = (CacheRebalancingEvent) evt0;
writer.writeString(event0.cacheName());
writer.writeInt(event0.partition());
writeNode(writer, event0.discoveryNode());
writer.writeInt(event0.discoveryEventType());
writer.writeString(event0.discoveryEventName());
writer.writeLong(event0.discoveryTimestamp());
} else if (evt0 instanceof CheckpointEvent) {
writer.writeInt(6);
writeCommonEventData(writer, evt0);
CheckpointEvent event0 = (CheckpointEvent) evt0;
writer.writeString(event0.key());
} else if (evt0 instanceof DiscoveryEvent) {
writer.writeInt(7);
writeCommonEventData(writer, evt0);
DiscoveryEvent event0 = (DiscoveryEvent) evt0;
writeNode(writer, event0.eventNode());
writer.writeLong(event0.topologyVersion());
writeNodes(writer, event0.topologyNodes());
} else if (evt0 instanceof JobEvent) {
writer.writeInt(8);
writeCommonEventData(writer, evt0);
JobEvent event0 = (JobEvent) evt0;
writer.writeString(event0.taskName());
writer.writeString(event0.taskClassName());
writer.writeObject(event0.taskSessionId());
writer.writeObject(event0.jobId());
writeNode(writer, event0.taskNode());
writer.writeUuid(event0.taskSubjectId());
} else if (evt0 instanceof TaskEvent) {
writer.writeInt(10);
writeCommonEventData(writer, evt0);
TaskEvent event0 = (TaskEvent) evt0;
writer.writeString(event0.taskName());
writer.writeString(event0.taskClassName());
writer.writeObject(event0.taskSessionId());
writer.writeBoolean(event0.internal());
writer.writeUuid(event0.subjectId());
} else
throw new IgniteException("Unsupported event: " + evt);
}
use of org.apache.ignite.events.DiscoveryEvent in project ignite by apache.
the class PlatformDataStreamer method processInLongOutLong.
/** {@inheritDoc} */
@Override
public long processInLongOutLong(int type, final long val) throws IgniteCheckedException {
switch(type) {
case OP_SET_ALLOW_OVERWRITE:
ldr.allowOverwrite(val == TRUE);
return TRUE;
case OP_SET_PER_NODE_BUFFER_SIZE:
ldr.perNodeBufferSize((int) val);
return TRUE;
case OP_SET_SKIP_STORE:
ldr.skipStore(val == TRUE);
return TRUE;
case OP_SET_PER_NODE_PARALLEL_OPS:
ldr.perNodeParallelOperations((int) val);
return TRUE;
case OP_LISTEN_TOPOLOGY:
{
lsnr = new GridLocalEventListener() {
@Override
public void onEvent(Event evt) {
DiscoveryEvent discoEvt = (DiscoveryEvent) evt;
long topVer = discoEvt.topologyVersion();
int topSize = platformCtx.kernalContext().discovery().cacheNodes(cacheName, new AffinityTopologyVersion(topVer)).size();
platformCtx.gateway().dataStreamerTopologyUpdate(val, topVer, topSize);
}
};
platformCtx.kernalContext().event().addLocalEventListener(lsnr, EVT_NODE_JOINED, EVT_NODE_FAILED, EVT_NODE_LEFT);
GridDiscoveryManager discoMgr = platformCtx.kernalContext().discovery();
AffinityTopologyVersion topVer = discoMgr.topologyVersionEx();
int topSize = discoMgr.cacheNodes(cacheName, topVer).size();
platformCtx.gateway().dataStreamerTopologyUpdate(val, topVer.topologyVersion(), topSize);
return TRUE;
}
case OP_ALLOW_OVERWRITE:
return ldr.allowOverwrite() ? TRUE : FALSE;
case OP_PER_NODE_BUFFER_SIZE:
return ldr.perNodeBufferSize();
case OP_SKIP_STORE:
return ldr.skipStore() ? TRUE : FALSE;
case OP_PER_NODE_PARALLEL_OPS:
return ldr.perNodeParallelOperations();
}
return super.processInLongOutLong(type, val);
}
use of org.apache.ignite.events.DiscoveryEvent in project ignite by apache.
the class GridTaskCommandHandler method requestTaskResult.
/**
* @param resHolderId Result holder.
* @param taskId Task ID.
* @return Response from task holder.
*/
private IgniteBiTuple<String, GridTaskResultResponse> requestTaskResult(final UUID resHolderId, IgniteUuid taskId) {
ClusterNode taskNode = ctx.discovery().node(resHolderId);
if (taskNode == null)
return F.t("Task result holder has left grid: " + resHolderId, null);
// Tuple: error message-response.
final IgniteBiTuple<String, GridTaskResultResponse> t = new IgniteBiTuple<>();
final Lock lock = new ReentrantLock();
final Condition cond = lock.newCondition();
GridMessageListener msgLsnr = new GridMessageListener() {
@Override
public void onMessage(UUID nodeId, Object msg) {
String err = null;
GridTaskResultResponse res = null;
if (!(msg instanceof GridTaskResultResponse))
err = "Received unexpected message: " + msg;
else if (!nodeId.equals(resHolderId))
err = "Received task result response from unexpected node [resHolderId=" + resHolderId + ", nodeId=" + nodeId + ']';
else
// Sender and message type are fine.
res = (GridTaskResultResponse) msg;
try {
res.result(U.unmarshal(ctx, res.resultBytes(), U.resolveClassLoader(ctx.config())));
} catch (IgniteCheckedException e) {
U.error(log, "Failed to unmarshal task result: " + res, e);
}
lock.lock();
try {
if (t.isEmpty()) {
t.set(err, res);
cond.signalAll();
}
} finally {
lock.unlock();
}
}
};
GridLocalEventListener discoLsnr = new GridLocalEventListener() {
@Override
public void onEvent(Event evt) {
assert evt instanceof DiscoveryEvent && (evt.type() == EVT_NODE_FAILED || evt.type() == EVT_NODE_LEFT) : "Unexpected event: " + evt;
DiscoveryEvent discoEvt = (DiscoveryEvent) evt;
if (resHolderId.equals(discoEvt.eventNode().id())) {
lock.lock();
try {
if (t.isEmpty()) {
t.set("Node that originated task execution has left grid: " + resHolderId, null);
cond.signalAll();
}
} finally {
lock.unlock();
}
}
}
};
// 1. Create unique topic name and register listener.
Object topic = TOPIC_REST.topic("task-result", topicIdGen.getAndIncrement());
try {
ctx.io().addMessageListener(topic, msgLsnr);
// 2. Send message.
try {
byte[] topicBytes = U.marshal(ctx, topic);
ctx.io().sendToGridTopic(taskNode, TOPIC_REST, new GridTaskResultRequest(taskId, topic, topicBytes), SYSTEM_POOL);
} catch (IgniteCheckedException e) {
String errMsg = "Failed to send task result request [resHolderId=" + resHolderId + ", err=" + e.getMessage() + ']';
if (log.isDebugEnabled())
log.debug(errMsg);
return F.t(errMsg, null);
}
// 3. Listen to discovery events.
ctx.event().addLocalEventListener(discoLsnr, EVT_NODE_FAILED, EVT_NODE_LEFT);
// 4. Check whether node has left before disco listener has been installed.
taskNode = ctx.discovery().node(resHolderId);
if (taskNode == null)
return F.t("Task result holder has left grid: " + resHolderId, null);
// 5. Wait for result.
lock.lock();
try {
long netTimeout = ctx.config().getNetworkTimeout();
if (t.isEmpty())
cond.await(netTimeout, MILLISECONDS);
if (t.isEmpty())
t.set1("Timed out waiting for task result (consider increasing 'networkTimeout' " + "configuration property) [resHolderId=" + resHolderId + ", netTimeout=" + netTimeout + ']');
// Return result
return t;
} catch (InterruptedException ignored) {
Thread.currentThread().interrupt();
return F.t("Interrupted while waiting for task result.", null);
} finally {
lock.unlock();
}
} finally {
ctx.io().removeMessageListener(topic, msgLsnr);
ctx.event().removeLocalEventListener(discoLsnr);
}
}
Aggregations