use of org.apache.ignite.events.DiscoveryEvent in project ignite by apache.
the class IgniteTxManager method start0.
/**
* {@inheritDoc}
*/
@Override
protected void start0() throws IgniteCheckedException {
txFinishSync = new GridCacheTxFinishSync<>(cctx);
txHnd = new IgniteTxHandler(cctx);
deferredAckMsgSnd = new GridDeferredAckMessageSender<GridCacheVersion>(cctx.time(), cctx.kernalContext().closure()) {
@Override
public int getTimeout() {
return DEFERRED_ONE_PHASE_COMMIT_ACK_REQUEST_TIMEOUT;
}
@Override
public int getBufferSize() {
return DEFERRED_ONE_PHASE_COMMIT_ACK_REQUEST_BUFFER_SIZE;
}
@Override
public void finish(UUID nodeId, Collection<GridCacheVersion> vers) {
GridDhtTxOnePhaseCommitAckRequest ackReq = new GridDhtTxOnePhaseCommitAckRequest(vers);
cctx.kernalContext().gateway().readLock();
try {
cctx.io().send(nodeId, ackReq, GridIoPolicy.SYSTEM_POOL);
} catch (ClusterTopologyCheckedException ignored) {
if (log.isDebugEnabled())
log.debug("Failed to send one phase commit ack to backup node because it left grid: " + nodeId);
} catch (IgniteCheckedException e) {
log.error("Failed to send one phase commit ack to backup node [backup=" + nodeId + ']', e);
} finally {
cctx.kernalContext().gateway().readUnlock();
}
}
};
cctx.gridEvents().addLocalEventListener(new GridLocalEventListener() {
@Override
public void onEvent(Event evt) {
assert evt instanceof DiscoveryEvent;
assert evt.type() == EVT_NODE_FAILED || evt.type() == EVT_NODE_LEFT;
DiscoveryEvent discoEvt = (DiscoveryEvent) evt;
UUID nodeId = discoEvt.eventNode().id();
// Wait some time in case there are some unprocessed messages from failed node.
cctx.time().addTimeoutObject(new NodeFailureTimeoutObject(nodeId));
if (txFinishSync != null)
txFinishSync.onNodeLeft(nodeId);
for (TxDeadlockFuture fut : deadlockDetectFuts.values()) fut.onNodeLeft(nodeId);
for (Map.Entry<GridCacheVersion, Object> entry : completedVersHashMap.entrySet()) {
Object obj = entry.getValue();
if (obj instanceof GridCacheReturnCompletableWrapper && nodeId.equals(((GridCacheReturnCompletableWrapper) obj).nodeId()))
removeTxReturn(entry.getKey());
}
}
}, EVT_NODE_FAILED, EVT_NODE_LEFT);
this.txDeadlockDetection = new TxDeadlockDetection(cctx);
cctx.gridIO().addMessageListener(TOPIC_TX, new DeadlockDetectionListener());
this.logTxRecords = IgniteSystemProperties.getBoolean(IGNITE_WAL_LOG_TX_RECORDS, false);
}
use of org.apache.ignite.events.DiscoveryEvent in project ignite by apache.
the class AbstractAffinityFunctionSelfTest method checkNodeRemoved.
/**
* @throws Exception If failed.
*/
protected void checkNodeRemoved(int backups, int neighborsPerHost, int neighborsPeriod) throws Exception {
AffinityFunction aff = affinityFunction();
int nodesCnt = 50;
List<ClusterNode> nodes = new ArrayList<>(nodesCnt);
List<List<ClusterNode>> prev = null;
for (int i = 0; i < nodesCnt; i++) {
info("======================================");
info("Assigning partitions: " + i);
info("======================================");
ClusterNode node = new GridTestNode(UUID.randomUUID());
if (neighborsPerHost > 0)
node.attribute(MAC_PREF + ((i / neighborsPeriod) % (nodesCnt / neighborsPerHost)));
nodes.add(node);
DiscoveryEvent discoEvt = new DiscoveryEvent(node, "", EventType.EVT_NODE_JOINED, node);
GridAffinityFunctionContextImpl ctx = new GridAffinityFunctionContextImpl(nodes, prev, discoEvt, new AffinityTopologyVersion(i), backups);
List<List<ClusterNode>> assignment = aff.assignPartitions(ctx);
info("Assigned.");
verifyAssignment(assignment, backups, aff.partitions(), nodes.size());
prev = assignment;
}
info("======================================");
info("Will remove nodes.");
info("======================================");
for (int i = 0; i < nodesCnt - 1; i++) {
info("======================================");
info("Assigning partitions: " + i);
info("======================================");
ClusterNode rmv = nodes.remove(nodes.size() - 1);
DiscoveryEvent discoEvt = new DiscoveryEvent(rmv, "", EventType.EVT_NODE_LEFT, rmv);
List<List<ClusterNode>> assignment = aff.assignPartitions(new GridAffinityFunctionContextImpl(nodes, prev, discoEvt, new AffinityTopologyVersion(i), backups));
info("Assigned.");
verifyAssignment(assignment, backups, aff.partitions(), nodes.size());
prev = assignment;
}
}
use of org.apache.ignite.events.DiscoveryEvent in project ignite by apache.
the class RendezvousAffinityFunctionSimpleBenchmark method assignPartitions.
/**
* @param aff Affinity function.
* @param nodes Topology.
* @param iter Number of iteration.
* @param prevAssignment Previous affinity assignment.
* @param backups Backups count.
* @return Tuple with affinity and time spend of the affinity calculation.
*/
private IgniteBiTuple<Long, List<List<ClusterNode>>> assignPartitions(AffinityFunction aff, List<ClusterNode> nodes, List<List<ClusterNode>> prevAssignment, int backups, int iter) {
GridAffinityFunctionContextImpl ctx = null;
switch(mode) {
case CHANGE_LAST_NODE:
ctx = nodesModificationChangeLast(nodes, prevAssignment, iter, backups);
break;
case CHANGE_FIRST_NODE:
ctx = nodesModificationChangeFirst(nodes, prevAssignment, iter, backups);
break;
case ADD:
ctx = new GridAffinityFunctionContextImpl(nodes, prevAssignment, addNode(nodes, iter), new AffinityTopologyVersion(nodes.size()), backups);
break;
case REMOVE_RANDOM:
ctx = new GridAffinityFunctionContextImpl(nodes, prevAssignment, removeNode(nodes, nodes.size() - 1), new AffinityTopologyVersion(nodes.size()), backups);
break;
case NONE:
ctx = new GridAffinityFunctionContextImpl(nodes, prevAssignment, new DiscoveryEvent(nodes.get(0), "", EventType.EVT_NODE_JOINED, nodes.get(nodes.size() - 1)), new AffinityTopologyVersion(nodes.size()), backups);
break;
}
long start = System.currentTimeMillis();
List<List<ClusterNode>> assignments = aff.assignPartitions(ctx);
return F.t(System.currentTimeMillis() - start, assignments);
}
use of org.apache.ignite.events.DiscoveryEvent in project ignite by apache.
the class RendezvousAffinityFunctionSimpleBenchmark method nodesModificationChangeFirst.
/**
* Modify the topology by remove the first node / add new node
*
* @param nodes Topology.
* @param prevAssignment Previous affinity.
* @param iter Number of iteration.
* @param backups Backups count.
* @return Affinity context.
*/
private GridAffinityFunctionContextImpl nodesModificationChangeFirst(List<ClusterNode> nodes, List<List<ClusterNode>> prevAssignment, int iter, int backups) {
DiscoveryEvent discoEvt;
discoEvt = iter % 2 == 0 ? addNode(nodes, iter) : removeNode(nodes, 0);
return new GridAffinityFunctionContextImpl(nodes, prevAssignment, discoEvt, new AffinityTopologyVersion(nodes.size()), backups);
}
use of org.apache.ignite.events.DiscoveryEvent in project ignite by apache.
the class AdaptiveLoadBalancingSpi method onContextInitialized0.
/**
* {@inheritDoc}
*/
@Override
protected void onContextInitialized0(IgniteSpiContext spiCtx) throws IgniteSpiException {
getSpiContext().addLocalEventListener(evtLsnr = new GridLocalEventListener() {
@Override
public void onEvent(Event evt) {
switch(evt.type()) {
case EVT_TASK_FINISHED:
case EVT_TASK_FAILED:
{
TaskEvent taskEvt = (TaskEvent) evt;
taskTops.remove(taskEvt.taskSessionId());
if (log.isDebugEnabled())
log.debug("Removed task topology from topology cache for session: " + taskEvt.taskSessionId());
break;
}
case EVT_JOB_MAPPED:
{
// We should keep topology and use cache in ComputeTask#map() method to
// avoid O(n*n/2) complexity, after that we can drop caches.
// Here we set mapped property and later cache will be ignored
JobEvent jobEvt = (JobEvent) evt;
IgniteBiTuple<Boolean, WeightedTopology> weightedTop = taskTops.get(jobEvt.taskSessionId());
if (weightedTop != null)
weightedTop.set1(true);
if (log.isDebugEnabled())
log.debug("Job has been mapped. Ignore cache for session: " + jobEvt.taskSessionId());
break;
}
case EVT_NODE_METRICS_UPDATED:
case EVT_NODE_FAILED:
case EVT_NODE_JOINED:
case EVT_NODE_LEFT:
{
DiscoveryEvent discoEvt = (DiscoveryEvent) evt;
rwLock.writeLock().lock();
try {
switch(evt.type()) {
case EVT_NODE_JOINED:
{
nodeJobs.put(discoEvt.eventNode().id(), new AtomicInteger(0));
break;
}
case EVT_NODE_LEFT:
case EVT_NODE_FAILED:
{
nodeJobs.remove(discoEvt.eventNode().id());
break;
}
case EVT_NODE_METRICS_UPDATED:
{
// Reset counter.
nodeJobs.put(discoEvt.eventNode().id(), new AtomicInteger(0));
break;
}
}
} finally {
rwLock.writeLock().unlock();
}
}
}
}
}, EVT_NODE_METRICS_UPDATED, EVT_NODE_FAILED, EVT_NODE_JOINED, EVT_NODE_LEFT, EVT_TASK_FINISHED, EVT_TASK_FAILED, EVT_JOB_MAPPED);
// Put all known nodes.
rwLock.writeLock().lock();
try {
for (ClusterNode node : getSpiContext().nodes()) nodeJobs.put(node.id(), new AtomicInteger(0));
} finally {
rwLock.writeLock().unlock();
}
}
Aggregations