use of org.apache.ignite.igfs.IgfsPath in project ignite by apache.
the class IgfsUtils method readPath.
/**
* Read non-null path from the input.
*
* @param in Input.
* @return IGFS path.
* @throws IOException If failed.
*/
public static IgfsPath readPath(ObjectInput in) throws IOException {
IgfsPath res = new IgfsPath();
res.readExternal(in);
return res;
}
use of org.apache.ignite.igfs.IgfsPath in project ignite by apache.
the class IgniteHadoopWeightedMapReducePlanner method igfsAffinityNodesForSplit.
/**
* Get IGFS affinity nodes for split if possible.
* <p>
* Order in the returned collection *is* significant, meaning that nodes containing more data
* go first. This way, the 1st nodes in the collection considered to be preferable for scheduling.
*
* @param split Input split.
* @return IGFS affinity or {@code null} if IGFS is not available.
* @throws IgniteCheckedException If failed.
*/
@Nullable
private Collection<UUID> igfsAffinityNodesForSplit(HadoopInputSplit split) throws IgniteCheckedException {
if (split instanceof HadoopFileBlock) {
HadoopFileBlock split0 = (HadoopFileBlock) split;
if (IgniteFileSystem.IGFS_SCHEME.equalsIgnoreCase(split0.file().getScheme())) {
HadoopIgfsEndpoint endpoint = new HadoopIgfsEndpoint(split0.file().getAuthority());
IgfsEx igfs = (IgfsEx) ((IgniteEx) ignite).igfsx(endpoint.igfs());
if (igfs != null && !igfs.isProxy(split0.file())) {
IgfsPath path = new IgfsPath(split0.file());
if (igfs.exists(path)) {
Collection<IgfsBlockLocation> blocks;
try {
blocks = igfs.affinity(path, split0.start(), split0.length());
} catch (IgniteException e) {
throw new IgniteCheckedException("Failed to get IGFS file block affinity [path=" + path + ", start=" + split0.start() + ", len=" + split0.length() + ']', e);
}
assert blocks != null;
if (blocks.size() == 1)
return blocks.iterator().next().nodeIds();
else {
// The most "local" nodes go first.
Map<UUID, Long> idToLen = new HashMap<>();
for (IgfsBlockLocation block : blocks) {
for (UUID id : block.nodeIds()) {
Long len = idToLen.get(id);
idToLen.put(id, len == null ? block.length() : block.length() + len);
}
}
// Sort the nodes in non-ascending order by contained data lengths.
Map<NodeIdAndLength, UUID> res = new TreeMap<>();
for (Map.Entry<UUID, Long> idToLenEntry : idToLen.entrySet()) {
UUID id = idToLenEntry.getKey();
res.put(new NodeIdAndLength(id, idToLenEntry.getValue()), id);
}
return new LinkedHashSet<>(res.values());
}
}
}
}
}
return null;
}
use of org.apache.ignite.igfs.IgfsPath in project ignite by apache.
the class IgniteHadoopFileSystem method setTimes.
/** {@inheritDoc} */
@Override
public void setTimes(Path p, long mtime, long atime) throws IOException {
enterBusy();
try {
A.notNull(p, "p");
IgfsPath path = convert(p);
rmtClient.setTimes(path, atime, mtime);
} finally {
leaveBusy();
}
}
use of org.apache.ignite.igfs.IgfsPath in project ignite by apache.
the class IgniteHadoopFileSystem method open.
/** {@inheritDoc} */
@Override
public FSDataInputStream open(Path f, int bufSize) throws IOException {
A.notNull(f, "f");
enterBusy();
try {
IgfsPath path = convert(f);
HadoopIgfsStreamDelegate stream = seqReadsBeforePrefetchOverride ? rmtClient.open(path, seqReadsBeforePrefetch) : rmtClient.open(path);
long logId = -1;
if (clientLog.isLogEnabled()) {
logId = IgfsLogger.nextId();
clientLog.logOpen(logId, path, bufSize, stream.length());
}
if (LOG.isDebugEnabled())
LOG.debug("Opening input stream [thread=" + Thread.currentThread().getName() + ", path=" + path + ", bufSize=" + bufSize + ']');
HadoopIgfsInputStream igfsIn = new HadoopIgfsInputStream(stream, stream.length(), bufSize, LOG, clientLog, logId);
if (LOG.isDebugEnabled())
LOG.debug("Opened input stream [path=" + path + ", delegate=" + stream + ']');
return new FSDataInputStream(igfsIn);
} finally {
leaveBusy();
}
}
use of org.apache.ignite.igfs.IgfsPath in project ignite by apache.
the class IgniteHadoopFileSystem method create.
/** {@inheritDoc} */
@SuppressWarnings("deprecation")
@Override
public FSDataOutputStream create(Path f, final FsPermission perm, boolean overwrite, int bufSize, short replication, long blockSize, Progressable progress) throws IOException {
A.notNull(f, "f");
enterBusy();
OutputStream out = null;
try {
IgfsPath path = convert(f);
if (LOG.isDebugEnabled())
LOG.debug("Opening output stream in create [thread=" + Thread.currentThread().getName() + "path=" + path + ", overwrite=" + overwrite + ", bufSize=" + bufSize + ']');
Map<String, String> propMap = permission(perm);
propMap.put(IgfsUtils.PROP_PREFER_LOCAL_WRITES, Boolean.toString(preferLocFileWrites));
// Create stream and close it in the 'finally' section if any sequential operation failed.
HadoopIgfsStreamDelegate stream = rmtClient.create(path, overwrite, colocateFileWrites, replication, blockSize, propMap);
assert stream != null;
long logId = -1;
if (clientLog.isLogEnabled()) {
logId = IgfsLogger.nextId();
clientLog.logCreate(logId, path, overwrite, bufSize, replication, blockSize);
}
if (LOG.isDebugEnabled())
LOG.debug("Opened output stream in create [path=" + path + ", delegate=" + stream + ']');
HadoopIgfsOutputStream igfsOut = new HadoopIgfsOutputStream(stream, LOG, clientLog, logId);
bufSize = Math.max(64 * 1024, bufSize);
out = new BufferedOutputStream(igfsOut, bufSize);
FSDataOutputStream res = new FSDataOutputStream(out, null, 0);
// Mark stream created successfully.
out = null;
return res;
} finally {
// Close if failed during stream creation.
if (out != null)
U.closeQuiet(out);
leaveBusy();
}
}
Aggregations