use of org.apache.ignite.internal.processors.igfs.IgfsEx in project ignite by apache.
the class VisorTaskUtils method resolveIgfsProfilerLogsDir.
/**
* Resolve IGFS profiler logs directory.
*
* @param igfs IGFS instance to resolve logs dir for.
* @return {@link Path} to log dir or {@code null} if not found.
* @throws IgniteCheckedException if failed to resolve.
*/
public static Path resolveIgfsProfilerLogsDir(IgniteFileSystem igfs) throws IgniteCheckedException {
String logsDir;
if (igfs instanceof IgfsEx)
logsDir = ((IgfsEx) igfs).clientLogDirectory();
else if (igfs == null)
throw new IgniteCheckedException("Failed to get profiler log folder (IGFS instance not found)");
else
throw new IgniteCheckedException("Failed to get profiler log folder (unexpected IGFS instance type)");
URL logsDirUrl = U.resolveIgniteUrl(logsDir != null ? logsDir : DFLT_IGFS_LOG_DIR);
return logsDirUrl != null ? new File(logsDirUrl.getPath()).toPath() : null;
}
use of org.apache.ignite.internal.processors.igfs.IgfsEx in project ignite by apache.
the class HadoopFIleSystemFactorySelfTest method start.
/**
* Start Ignite node with IGFS instance.
*
* @param name Node and IGFS name.
* @param endpointPort Endpoint port.
* @param dfltMode Default path mode.
* @param secondaryFs Secondary file system.
* @return Igfs instance.
*/
private static IgfsEx start(String name, int endpointPort, IgfsMode dfltMode, @Nullable IgfsSecondaryFileSystem secondaryFs) {
IgfsIpcEndpointConfiguration endpointCfg = new IgfsIpcEndpointConfiguration();
endpointCfg.setType(IgfsIpcEndpointType.TCP);
endpointCfg.setHost("127.0.0.1");
endpointCfg.setPort(endpointPort);
FileSystemConfiguration igfsCfg = new FileSystemConfiguration();
igfsCfg.setName(name);
igfsCfg.setDefaultMode(dfltMode);
igfsCfg.setIpcEndpointConfiguration(endpointCfg);
igfsCfg.setSecondaryFileSystem(secondaryFs);
CacheConfiguration dataCacheCfg = defaultCacheConfiguration();
dataCacheCfg.setCacheMode(PARTITIONED);
dataCacheCfg.setWriteSynchronizationMode(CacheWriteSynchronizationMode.FULL_SYNC);
dataCacheCfg.setAffinityMapper(new IgfsGroupDataBlocksKeyMapper(2));
dataCacheCfg.setBackups(0);
dataCacheCfg.setAtomicityMode(TRANSACTIONAL);
CacheConfiguration metaCacheCfg = defaultCacheConfiguration();
metaCacheCfg.setCacheMode(REPLICATED);
metaCacheCfg.setWriteSynchronizationMode(CacheWriteSynchronizationMode.FULL_SYNC);
metaCacheCfg.setAtomicityMode(TRANSACTIONAL);
igfsCfg.setDataCacheConfiguration(dataCacheCfg);
igfsCfg.setMetaCacheConfiguration(metaCacheCfg);
if (secondaryFs != null) {
Map<String, IgfsMode> modes = new HashMap<>();
modes.put("/ignite/sync/", IgfsMode.DUAL_SYNC);
modes.put("/ignite/async/", IgfsMode.DUAL_ASYNC);
modes.put("/ignite/proxy/", IgfsMode.PROXY);
igfsCfg.setPathModes(modes);
}
IgniteConfiguration cfg = new IgniteConfiguration();
cfg.setIgniteInstanceName(name);
TcpDiscoverySpi discoSpi = new TcpDiscoverySpi();
discoSpi.setIpFinder(new TcpDiscoveryVmIpFinder(true));
cfg.setDiscoverySpi(discoSpi);
cfg.setFileSystemConfiguration(igfsCfg);
cfg.setLocalHost("127.0.0.1");
cfg.setConnectorConfiguration(null);
return (IgfsEx) G.start(cfg).fileSystem(name);
}
use of org.apache.ignite.internal.processors.igfs.IgfsEx in project ignite by apache.
the class IgniteHadoopWeightedMapReducePlanner method igfsAffinityNodesForSplit.
/**
* Get IGFS affinity nodes for split if possible.
* <p>
* Order in the returned collection *is* significant, meaning that nodes containing more data
* go first. This way, the 1st nodes in the collection considered to be preferable for scheduling.
*
* @param split Input split.
* @return IGFS affinity or {@code null} if IGFS is not available.
* @throws IgniteCheckedException If failed.
*/
@Nullable
private Collection<UUID> igfsAffinityNodesForSplit(HadoopInputSplit split) throws IgniteCheckedException {
if (split instanceof HadoopFileBlock) {
HadoopFileBlock split0 = (HadoopFileBlock) split;
if (IgniteFileSystem.IGFS_SCHEME.equalsIgnoreCase(split0.file().getScheme())) {
HadoopIgfsEndpoint endpoint = new HadoopIgfsEndpoint(split0.file().getAuthority());
IgfsEx igfs = (IgfsEx) ((IgniteEx) ignite).igfsx(endpoint.igfs());
if (igfs != null && !igfs.isProxy(split0.file())) {
IgfsPath path = new IgfsPath(split0.file());
if (igfs.exists(path)) {
Collection<IgfsBlockLocation> blocks;
try {
blocks = igfs.affinity(path, split0.start(), split0.length());
} catch (IgniteException e) {
throw new IgniteCheckedException("Failed to get IGFS file block affinity [path=" + path + ", start=" + split0.start() + ", len=" + split0.length() + ']', e);
}
assert blocks != null;
if (blocks.size() == 1)
return blocks.iterator().next().nodeIds();
else {
// The most "local" nodes go first.
Map<UUID, Long> idToLen = new HashMap<>();
for (IgfsBlockLocation block : blocks) {
for (UUID id : block.nodeIds()) {
Long len = idToLen.get(id);
idToLen.put(id, len == null ? block.length() : block.length() + len);
}
}
// Sort the nodes in non-ascending order by contained data lengths.
Map<NodeIdAndLength, UUID> res = new TreeMap<>();
for (Map.Entry<UUID, Long> idToLenEntry : idToLen.entrySet()) {
UUID id = idToLenEntry.getKey();
res.put(new NodeIdAndLength(id, idToLenEntry.getValue()), id);
}
return new LinkedHashSet<>(res.values());
}
}
}
}
}
return null;
}
use of org.apache.ignite.internal.processors.igfs.IgfsEx in project ignite by apache.
the class IgfsFragmentizerAbstractSelfTest method awaitFileFragmenting.
/**
* @param gridIdx Grid index.
* @param path Path to await.
* @throws Exception If failed.
*/
protected void awaitFileFragmenting(int gridIdx, IgfsPath path) throws Exception {
IgfsEx igfs = (IgfsEx) grid(gridIdx).fileSystem("igfs");
IgfsMetaManager meta = igfs.context().meta();
IgniteUuid fileId = meta.fileId(path);
if (fileId == null)
throw new IgfsPathNotFoundException("File not found: " + path);
IgfsEntryInfo fileInfo = meta.info(fileId);
do {
if (fileInfo == null)
throw new IgfsPathNotFoundException("File not found: " + path);
if (fileInfo.fileMap().ranges().isEmpty())
return;
U.sleep(100);
fileInfo = meta.info(fileId);
} while (true);
}
Aggregations