use of org.apache.ignite.internal.util.typedef.T2 in project ignite by apache.
the class HadoopV2Job method getTaskContext.
/**
* {@inheritDoc}
*/
@SuppressWarnings({ "unchecked", "MismatchedQueryAndUpdateOfCollection" })
@Override
public HadoopTaskContext getTaskContext(HadoopTaskInfo info) throws IgniteCheckedException {
T2<HadoopTaskType, Integer> locTaskId = new T2<>(info.type(), info.taskNumber());
GridFutureAdapter<HadoopTaskContext> fut = ctxs.get(locTaskId);
if (fut != null)
return fut.get();
GridFutureAdapter<HadoopTaskContext> old = ctxs.putIfAbsent(locTaskId, fut = new GridFutureAdapter<>());
if (old != null)
return old.get();
Class<? extends HadoopTaskContext> cls = taskCtxClsPool.poll();
try {
if (cls == null) {
// If there is no pooled class, then load new one.
// Note that the classloader identified by the task it was initially created for,
// but later it may be reused for other tasks.
HadoopClassLoader ldr = sharedClsLdr != null ? sharedClsLdr : createClassLoader(HadoopClassLoader.nameForTask(info, false));
cls = (Class<? extends HadoopTaskContext>) ldr.loadClass(HadoopV2TaskContext.class.getName());
fullCtxClsQueue.add(cls);
}
Constructor<?> ctr = cls.getConstructor(HadoopTaskInfo.class, HadoopJobEx.class, HadoopJobId.class, UUID.class, DataInput.class);
if (jobConfData == null)
synchronized (jobConf) {
if (jobConfData == null) {
ByteArrayOutputStream buf = new ByteArrayOutputStream();
jobConf.write(new DataOutputStream(buf));
jobConfData = buf.toByteArray();
}
}
HadoopTaskContext res = (HadoopTaskContext) ctr.newInstance(info, this, jobId, locNodeId, new DataInputStream(new ByteArrayInputStream(jobConfData)));
fut.onDone(res);
return res;
} catch (Throwable e) {
IgniteCheckedException te = transformException(e);
fut.onDone(te);
if (e instanceof Error)
throw (Error) e;
throw te;
}
}
use of org.apache.ignite.internal.util.typedef.T2 in project ignite by apache.
the class HadoopV2Job method cleanupTaskEnvironment.
/**
* {@inheritDoc}
*/
@Override
public void cleanupTaskEnvironment(HadoopTaskInfo info) throws IgniteCheckedException {
HadoopTaskContext ctx = ctxs.remove(new T2<>(info.type(), info.taskNumber())).get();
taskCtxClsPool.add(ctx.getClass());
File locDir = taskLocalDir(igniteWorkDirectory(), locNodeId, info);
if (locDir.exists())
U.delete(locDir);
}
use of org.apache.ignite.internal.util.typedef.T2 in project ignite by apache.
the class GridDsiPerfJob method execute.
/**
* @return Result.
*/
@SuppressWarnings("ConstantConditions")
@Override
public Object execute() {
ConcurrentMap<String, T2<AtomicLong, AtomicLong>> nodeLoc = ignite.cluster().nodeLocalMap();
T2<AtomicLong, AtomicLong> cntrs = nodeLoc.get("cntrs");
if (cntrs == null) {
T2<AtomicLong, AtomicLong> other = nodeLoc.putIfAbsent("cntrs", cntrs = new T2<>(new AtomicLong(), new AtomicLong(System.currentTimeMillis())));
if (other != null)
cntrs = other;
}
long cnt = cntrs.get1().incrementAndGet();
GridNearCacheAdapter near = (GridNearCacheAdapter) ((IgniteKernal) ignite).internalCache(cacheName);
GridDhtCacheAdapter dht = near.dht();
doWork();
long start = cntrs.get2().get();
long now = System.currentTimeMillis();
long dur = now - start;
if (dur > 20000 && cntrs.get2().compareAndSet(start, System.currentTimeMillis())) {
cntrs.get1().set(0);
long txPerSec = cnt / (dur / 1000);
X.println("Stats [tx/sec=" + txPerSec + ", nearSize=" + near.size() + ", dhtSize=" + dht.size() + ']');
return new T3<>(txPerSec, near.size(), dht.size());
}
return null;
}
use of org.apache.ignite.internal.util.typedef.T2 in project ignite by apache.
the class WebSessionFilter method doFilterV1.
/**
* @param httpReq Request.
* @param res Response.
* @param chain Filter chain.
* @return Session ID.
* @throws IOException In case of I/O error.
* @throws ServletException In case of servlet error.
* @throws CacheException In case of other error.
*/
private String doFilterV1(HttpServletRequest httpReq, ServletResponse res, FilterChain chain) throws IOException, ServletException, CacheException {
WebSession cached = null;
String sesId = httpReq.getRequestedSessionId();
if (sesId != null) {
sesId = transformSessionId(sesId);
for (int i = 0; i < retries; i++) {
try {
cached = cache.get(sesId);
break;
} catch (CacheException | IgniteException | IllegalStateException e) {
handleLoadSessionException(sesId, i, e);
}
}
if (cached != null) {
if (log.isDebugEnabled())
log.debug("Using cached session for ID: " + sesId);
if (cached.isNew())
cached = new WebSession(cached.getId(), cached, false);
} else {
if (log.isDebugEnabled())
log.debug("Cached session was invalidated and doesn't exist: " + sesId);
HttpSession ses = httpReq.getSession(false);
if (ses != null) {
try {
ses.invalidate();
} catch (IllegalStateException ignore) {
// Session was already invalidated.
}
}
cached = createSession(httpReq);
}
} else
cached = createSession(httpReq);
assert cached != null;
sesId = cached.getId();
cached.servletContext(ctx);
cached.filter(this);
cached.resetUpdates();
cached.genSes(httpReq.getSession(false));
httpReq = new RequestWrapper(httpReq, cached);
chain.doFilter(httpReq, res);
HttpSession ses = httpReq.getSession(false);
if (ses != null && ses instanceof WebSession) {
Collection<T2<String, Object>> updates = ((WebSession) ses).updates();
if (updates != null)
updateAttributes(transformSessionId(sesId), updates, ses.getMaxInactiveInterval());
}
return sesId;
}
use of org.apache.ignite.internal.util.typedef.T2 in project ignite by apache.
the class GridCommonAbstractTest method printPartitionState.
/**
* @param cacheName Cache name.
* @param firstParts Count partition for print (will be print first count partition).
*
* Print partitionState for cache.
*/
protected void printPartitionState(String cacheName, int firstParts) {
StringBuilder sb = new StringBuilder();
sb.append("----preload sync futures----\n");
for (Ignite ig : G.allGrids()) {
IgniteKernal k = ((IgniteKernal) ig);
IgniteInternalFuture<?> syncFut = k.internalCache(cacheName).preloader().syncFuture();
sb.append("nodeId=").append(k.context().localNodeId()).append(" isDone=").append(syncFut.isDone()).append("\n");
}
sb.append("----rebalance futures----\n");
for (Ignite ig : G.allGrids()) {
IgniteKernal k = ((IgniteKernal) ig);
IgniteInternalFuture<?> f = k.internalCache(cacheName).preloader().rebalanceFuture();
try {
sb.append("nodeId=").append(k.context().localNodeId()).append(" isDone=").append(f.isDone()).append(" res=").append(f.isDone() ? f.get() : "N/A").append(" topVer=").append((U.hasField(f, "topVer") ? String.valueOf(U.field(f, "topVer")) : "[unknown] may be it is finished future")).append("\n");
Map<UUID, T2<Long, Collection<Integer>>> remaining = U.field(f, "remaining");
sb.append("remaining:");
if (remaining.isEmpty())
sb.append("empty\n");
else
for (Map.Entry<UUID, T2<Long, Collection<Integer>>> e : remaining.entrySet()) sb.append("\nuuid=").append(e.getKey()).append(" startTime=").append(e.getValue().getKey()).append(" parts=").append(Arrays.toString(e.getValue().getValue().toArray())).append("\n");
} catch (Throwable e) {
log.error(e.getMessage());
}
}
sb.append("----partition state----\n");
for (Ignite g : G.allGrids()) {
IgniteKernal g0 = (IgniteKernal) g;
sb.append("localNodeId=").append(g0.localNode().id()).append(" grid=").append(g0.name()).append("\n");
IgniteCacheProxy<?, ?> cache = g0.context().cache().jcache(cacheName);
GridDhtCacheAdapter<?, ?> dht = dht(cache);
GridDhtPartitionTopology top = dht.topology();
int parts = firstParts == 0 ? cache.context().config().getAffinity().partitions() : firstParts;
for (int p = 0; p < parts; p++) {
AffinityTopologyVersion readyVer = dht.context().shared().exchange().readyAffinityVersion();
Collection<UUID> affNodes = F.nodeIds(dht.context().affinity().assignment(readyVer).idealAssignment().get(p));
GridDhtLocalPartition part = top.localPartition(p, AffinityTopologyVersion.NONE, false);
sb.append("local part=");
if (part != null)
sb.append(p).append(" state=").append(part.state());
else
sb.append(p).append(" is null");
sb.append(" isAffNode=").append(affNodes.contains(g0.localNode().id())).append("\n");
for (UUID nodeId : F.nodeIds(g0.context().discovery().allNodes())) {
if (!nodeId.equals(g0.localNode().id()))
sb.append(" nodeId=").append(nodeId).append(" part=").append(p).append(" state=").append(top.partitionState(nodeId, p)).append(" isAffNode=").append(affNodes.contains(nodeId)).append("\n");
}
}
sb.append("\n");
}
log.info("dump partitions state for <" + cacheName + ">:\n" + sb.toString());
}
Aggregations