use of org.apache.ignite.internal.util.future.GridFutureAdapter in project ignite by apache.
the class GridCachePartitionExchangeManager method onKernalStart0.
/** {@inheritDoc} */
@Override
protected void onKernalStart0(boolean reconnect) throws IgniteCheckedException {
super.onKernalStart0(reconnect);
ClusterNode loc = cctx.localNode();
long startTime = loc.metrics().getStartTime();
assert startTime > 0;
// Generate dummy discovery event for local node joining.
T2<DiscoveryEvent, DiscoCache> locJoin = cctx.discovery().localJoin();
DiscoveryEvent discoEvt = locJoin.get1();
DiscoCache discoCache = locJoin.get2();
GridDhtPartitionExchangeId exchId = initialExchangeId();
GridDhtPartitionsExchangeFuture fut = exchangeFuture(exchId, discoEvt, discoCache, null, null);
if (reconnect)
reconnectExchangeFut = new GridFutureAdapter<>();
exchWorker.addFirstExchangeFuture(fut);
if (!cctx.kernalContext().clientNode()) {
for (int cnt = 0; cnt < cctx.gridConfig().getRebalanceThreadPoolSize(); cnt++) {
final int idx = cnt;
cctx.io().addOrderedHandler(rebalanceTopic(cnt), new CI2<UUID, GridCacheMessage>() {
@Override
public void apply(final UUID id, final GridCacheMessage m) {
if (!enterBusy())
return;
try {
GridCacheContext cacheCtx = cctx.cacheContext(m.cacheId);
if (cacheCtx != null) {
if (m instanceof GridDhtPartitionSupplyMessage)
cacheCtx.preloader().handleSupplyMessage(idx, id, (GridDhtPartitionSupplyMessage) m);
else if (m instanceof GridDhtPartitionDemandMessage)
cacheCtx.preloader().handleDemandMessage(idx, id, (GridDhtPartitionDemandMessage) m);
else
U.error(log, "Unsupported message type: " + m.getClass().getName());
}
} finally {
leaveBusy();
}
}
});
}
}
new IgniteThread(cctx.igniteInstanceName(), "exchange-worker", exchWorker).start();
if (reconnect) {
fut.listen(new CI1<IgniteInternalFuture<AffinityTopologyVersion>>() {
@Override
public void apply(IgniteInternalFuture<AffinityTopologyVersion> fut) {
try {
fut.get();
for (GridCacheContext cacheCtx : cctx.cacheContexts()) cacheCtx.preloader().onInitialExchangeComplete(null);
reconnectExchangeFut.onDone();
} catch (IgniteCheckedException e) {
for (GridCacheContext cacheCtx : cctx.cacheContexts()) cacheCtx.preloader().onInitialExchangeComplete(e);
reconnectExchangeFut.onDone(e);
}
}
});
} else {
if (log.isDebugEnabled())
log.debug("Beginning to wait on local exchange future: " + fut);
boolean first = true;
while (true) {
try {
fut.get(cctx.preloadExchangeTimeout());
break;
} catch (IgniteFutureTimeoutCheckedException ignored) {
if (first) {
U.warn(log, "Failed to wait for initial partition map exchange. " + "Possible reasons are: " + U.nl() + " ^-- Transactions in deadlock." + U.nl() + " ^-- Long running transactions (ignore if this is the case)." + U.nl() + " ^-- Unreleased explicit locks.");
first = false;
} else
U.warn(log, "Still waiting for initial partition map exchange [fut=" + fut + ']');
} catch (IgniteNeedReconnectException e) {
throw e;
} catch (Exception e) {
if (fut.reconnectOnError(e))
throw new IgniteNeedReconnectException(cctx.localNode(), e);
throw e;
}
}
AffinityTopologyVersion nodeStartVer = new AffinityTopologyVersion(discoEvt.topologyVersion(), 0);
for (GridCacheContext cacheCtx : cctx.cacheContexts()) {
if (nodeStartVer.equals(cacheCtx.startTopologyVersion()))
cacheCtx.preloader().onInitialExchangeComplete(null);
}
if (log.isDebugEnabled())
log.debug("Finished waiting for initial exchange: " + fut.exchangeId());
}
}
use of org.apache.ignite.internal.util.future.GridFutureAdapter in project ignite by apache.
the class GridDistributedCacheAdapter method removeAllAsync.
/** {@inheritDoc} */
@Override
public IgniteInternalFuture<?> removeAllAsync() {
GridFutureAdapter<Void> opFut = new GridFutureAdapter<>();
AffinityTopologyVersion topVer = ctx.affinity().affinityTopologyVersion();
CacheOperationContext opCtx = ctx.operationContextPerCall();
removeAllAsync(opFut, topVer, opCtx != null && opCtx.skipStore(), opCtx != null && opCtx.isKeepBinary());
return opFut;
}
use of org.apache.ignite.internal.util.future.GridFutureAdapter in project ignite by apache.
the class GridCacheQueryManager method fieldsQueryResult.
/**
* @param qryInfo Info.
* @return Iterator.
* @throws IgniteCheckedException In case of error.
*/
private FieldsResult fieldsQueryResult(GridCacheQueryInfo qryInfo, String taskName) throws IgniteCheckedException {
final UUID sndId = qryInfo.senderId();
assert sndId != null;
Map<Long, GridFutureAdapter<FieldsResult>> iters = fieldsQryRes.get(sndId);
if (iters == null) {
iters = new LinkedHashMap<Long, GridFutureAdapter<FieldsResult>>(16, 0.75f, true) {
@Override
protected boolean removeEldestEntry(Map.Entry<Long, GridFutureAdapter<FieldsResult>> e) {
boolean rmv = size() > maxIterCnt;
if (rmv) {
try {
e.getValue().get().closeIfNotShared(recipient(sndId, e.getKey()));
} catch (IgniteCheckedException ex) {
U.error(log, "Failed to close fields query iterator.", ex);
}
}
return rmv;
}
@Override
public boolean equals(Object o) {
return o == this;
}
};
Map<Long, GridFutureAdapter<FieldsResult>> old = fieldsQryRes.putIfAbsent(sndId, iters);
if (old != null)
iters = old;
}
return fieldsQueryResult(iters, qryInfo, taskName);
}
use of org.apache.ignite.internal.util.future.GridFutureAdapter in project ignite by apache.
the class HadoopV2Job method getTaskContext.
/** {@inheritDoc} */
@SuppressWarnings({ "unchecked", "MismatchedQueryAndUpdateOfCollection" })
@Override
public HadoopTaskContext getTaskContext(HadoopTaskInfo info) throws IgniteCheckedException {
T2<HadoopTaskType, Integer> locTaskId = new T2<>(info.type(), info.taskNumber());
GridFutureAdapter<HadoopTaskContext> fut = ctxs.get(locTaskId);
if (fut != null)
return fut.get();
GridFutureAdapter<HadoopTaskContext> old = ctxs.putIfAbsent(locTaskId, fut = new GridFutureAdapter<>());
if (old != null)
return old.get();
Class<? extends HadoopTaskContext> cls = taskCtxClsPool.poll();
try {
if (cls == null) {
// If there is no pooled class, then load new one.
// Note that the classloader identified by the task it was initially created for,
// but later it may be reused for other tasks.
HadoopClassLoader ldr = sharedClsLdr != null ? sharedClsLdr : createClassLoader(HadoopClassLoader.nameForTask(info, false));
cls = (Class<? extends HadoopTaskContext>) ldr.loadClass(HadoopV2TaskContext.class.getName());
fullCtxClsQueue.add(cls);
}
Constructor<?> ctr = cls.getConstructor(HadoopTaskInfo.class, HadoopJobEx.class, HadoopJobId.class, UUID.class, DataInput.class);
if (jobConfData == null)
synchronized (jobConf) {
if (jobConfData == null) {
ByteArrayOutputStream buf = new ByteArrayOutputStream();
jobConf.write(new DataOutputStream(buf));
jobConfData = buf.toByteArray();
}
}
HadoopTaskContext res = (HadoopTaskContext) ctr.newInstance(info, this, jobId, locNodeId, new DataInputStream(new ByteArrayInputStream(jobConfData)));
fut.onDone(res);
return res;
} catch (Throwable e) {
IgniteCheckedException te = transformException(e);
fut.onDone(te);
if (e instanceof Error)
throw (Error) e;
throw te;
}
}
use of org.apache.ignite.internal.util.future.GridFutureAdapter in project ignite by apache.
the class GridCacheOrderedPreloadingSelfTest method checkPreloadOrder.
/**
* @param first First cache mode.
* @param second Second cache mode.
* @throws Exception If failed.
*/
private void checkPreloadOrder(CacheMode first, CacheMode second) throws Exception {
firstCacheMode = first;
secondCacheMode = second;
Ignite g = startGrid(0);
try {
IgniteCache<Object, Object> cache = g.cache("first");
// Put some data into cache.
for (int i = 0; i < 1000; i++) cache.put(i, i);
for (int i = 1; i < GRID_CNT; i++) startGrid(i);
// For first node in topology replicated preloader gets completed right away.
for (int i = 1; i < GRID_CNT; i++) {
IgniteKernal kernal = (IgniteKernal) grid(i);
GridFutureAdapter<?> fut1 = (GridFutureAdapter<?>) kernal.internalCache(FIRST_CACHE_NAME).preloader().syncFuture();
GridFutureAdapter<?> fut2 = (GridFutureAdapter<?>) kernal.internalCache(SECOND_CACHE_NAME).preloader().syncFuture();
fut1.get();
fut2.get();
long firstSyncTime = times.get(i).get(FIRST_CACHE_NAME);
long secondSyncTime = times.get(i).get(SECOND_CACHE_NAME);
assertTrue(FIRST_CACHE_NAME + " [syncTime=" + firstSyncTime + "], " + SECOND_CACHE_NAME + " [syncTime=" + secondSyncTime + "]", firstSyncTime <= secondSyncTime);
}
} finally {
stopAllGrids();
}
}
Aggregations