use of org.apache.ignite.internal.processors.igfs.IgfsProcessorAdapter in project ignite by apache.
the class IgfsTask method map.
/** {@inheritDoc} */
@Nullable
@Override
public final Map<? extends ComputeJob, ClusterNode> map(List<ClusterNode> subgrid, @Nullable IgfsTaskArgs<T> args) {
assert ignite != null;
assert args != null;
IgniteFileSystem fs = ignite.fileSystem(args.igfsName());
IgfsProcessorAdapter igfsProc = ((IgniteKernal) ignite).context().igfs();
Map<ComputeJob, ClusterNode> splitMap = new HashMap<>();
Map<UUID, ClusterNode> nodes = mapSubgrid(subgrid);
for (IgfsPath path : args.paths()) {
IgfsFile file = fs.info(path);
if (file == null) {
if (args.skipNonExistentFiles())
continue;
else
throw new IgniteException("Failed to process IGFS file because it doesn't exist: " + path);
}
Collection<IgfsBlockLocation> aff = fs.affinity(path, 0, file.length(), args.maxRangeLength());
long totalLen = 0;
for (IgfsBlockLocation loc : aff) {
ClusterNode node = null;
for (UUID nodeId : loc.nodeIds()) {
node = nodes.get(nodeId);
if (node != null)
break;
}
if (node == null)
throw new IgniteException("Failed to find any of block affinity nodes in subgrid [loc=" + loc + ", subgrid=" + subgrid + ']');
IgfsJob job = createJob(path, new IgfsFileRange(file.path(), loc.start(), loc.length()), args);
if (job != null) {
ComputeJob jobImpl = igfsProc.createJob(job, fs.name(), file.path(), loc.start(), loc.length(), args.recordResolver());
splitMap.put(jobImpl, node);
}
totalLen += loc.length();
}
assert totalLen == file.length();
}
return splitMap;
}
use of org.apache.ignite.internal.processors.igfs.IgfsProcessorAdapter in project ignite by apache.
the class IgniteHadoopFileSystemClientSelfTest method switchHandlerErrorFlag.
/**
* Set IGFS REST handler error flag to the given state.
*
* @param flag Flag state.
* @throws Exception If failed.
*/
@SuppressWarnings("ConstantConditions")
private void switchHandlerErrorFlag(boolean flag) throws Exception {
IgfsProcessorAdapter igfsProc = ((IgniteKernal) grid(0)).context().igfs();
Map<String, IgfsContext> igfsMap = getField(igfsProc, "igfsCache");
IgfsServerManager srvMgr = F.first(igfsMap.values()).server();
Collection<IgfsServer> srvrs = getField(srvMgr, "srvrs");
IgfsServerHandler igfsHnd = getField(F.first(srvrs), "hnd");
Field field = igfsHnd.getClass().getDeclaredField("errWrite");
field.setAccessible(true);
field.set(null, flag);
}
use of org.apache.ignite.internal.processors.igfs.IgfsProcessorAdapter in project ignite by apache.
the class VisorNodeDataCollectorJob method igfs.
/**
* Collect IGFSs.
*
* @param res Job result.
*/
protected void igfs(VisorNodeDataCollectorJobResult res) {
try {
IgfsProcessorAdapter igfsProc = ignite.context().igfs();
for (IgniteFileSystem igfs : igfsProc.igfss()) {
long start0 = U.currentTimeMillis();
FileSystemConfiguration igfsCfg = igfs.configuration();
if (proxyCache(igfsCfg.getDataCacheConfiguration().getName()) || proxyCache(igfsCfg.getMetaCacheConfiguration().getName()))
continue;
try {
Collection<IpcServerEndpoint> endPoints = igfsProc.endpoints(igfs.name());
if (endPoints != null) {
for (IpcServerEndpoint ep : endPoints) if (ep.isManagement())
res.getIgfsEndpoints().add(new VisorIgfsEndpoint(igfs.name(), ignite.name(), ep.getHost(), ep.getPort()));
}
res.getIgfss().add(new VisorIgfs(igfs));
} finally {
if (debug)
log(ignite.log(), "Collected IGFS: " + igfs.name(), getClass(), start0);
}
}
} catch (Exception igfssEx) {
res.setIgfssEx(igfssEx);
}
}
Aggregations