Search in sources :

Example 11 with IgfsPath

use of org.apache.ignite.igfs.IgfsPath in project ignite by apache.

the class IgfsUtils method readPath.

/**
     * Read non-null path from the input.
     *
     * @param in Input.
     * @return IGFS path.
     * @throws IOException If failed.
     */
public static IgfsPath readPath(ObjectInput in) throws IOException {
    IgfsPath res = new IgfsPath();
    res.readExternal(in);
    return res;
}
Also used : IgfsPath(org.apache.ignite.igfs.IgfsPath)

Example 12 with IgfsPath

use of org.apache.ignite.igfs.IgfsPath in project ignite by apache.

the class IgniteHadoopWeightedMapReducePlanner method igfsAffinityNodesForSplit.

/**
     * Get IGFS affinity nodes for split if possible.
     * <p>
     * Order in the returned collection *is* significant, meaning that nodes containing more data
     * go first. This way, the 1st nodes in the collection considered to be preferable for scheduling.
     *
     * @param split Input split.
     * @return IGFS affinity or {@code null} if IGFS is not available.
     * @throws IgniteCheckedException If failed.
     */
@Nullable
private Collection<UUID> igfsAffinityNodesForSplit(HadoopInputSplit split) throws IgniteCheckedException {
    if (split instanceof HadoopFileBlock) {
        HadoopFileBlock split0 = (HadoopFileBlock) split;
        if (IgniteFileSystem.IGFS_SCHEME.equalsIgnoreCase(split0.file().getScheme())) {
            HadoopIgfsEndpoint endpoint = new HadoopIgfsEndpoint(split0.file().getAuthority());
            IgfsEx igfs = (IgfsEx) ((IgniteEx) ignite).igfsx(endpoint.igfs());
            if (igfs != null && !igfs.isProxy(split0.file())) {
                IgfsPath path = new IgfsPath(split0.file());
                if (igfs.exists(path)) {
                    Collection<IgfsBlockLocation> blocks;
                    try {
                        blocks = igfs.affinity(path, split0.start(), split0.length());
                    } catch (IgniteException e) {
                        throw new IgniteCheckedException("Failed to get IGFS file block affinity [path=" + path + ", start=" + split0.start() + ", len=" + split0.length() + ']', e);
                    }
                    assert blocks != null;
                    if (blocks.size() == 1)
                        return blocks.iterator().next().nodeIds();
                    else {
                        // The most "local" nodes go first.
                        Map<UUID, Long> idToLen = new HashMap<>();
                        for (IgfsBlockLocation block : blocks) {
                            for (UUID id : block.nodeIds()) {
                                Long len = idToLen.get(id);
                                idToLen.put(id, len == null ? block.length() : block.length() + len);
                            }
                        }
                        // Sort the nodes in non-ascending order by contained data lengths.
                        Map<NodeIdAndLength, UUID> res = new TreeMap<>();
                        for (Map.Entry<UUID, Long> idToLenEntry : idToLen.entrySet()) {
                            UUID id = idToLenEntry.getKey();
                            res.put(new NodeIdAndLength(id, idToLenEntry.getValue()), id);
                        }
                        return new LinkedHashSet<>(res.values());
                    }
                }
            }
        }
    }
    return null;
}
Also used : LinkedHashSet(java.util.LinkedHashSet) HashMap(java.util.HashMap) IdentityHashMap(java.util.IdentityHashMap) HadoopIgfsEndpoint(org.apache.ignite.internal.processors.hadoop.igfs.HadoopIgfsEndpoint) IgfsBlockLocation(org.apache.ignite.igfs.IgfsBlockLocation) HadoopFileBlock(org.apache.ignite.internal.processors.hadoop.HadoopFileBlock) TreeMap(java.util.TreeMap) IgfsPath(org.apache.ignite.igfs.IgfsPath) IgfsEx(org.apache.ignite.internal.processors.igfs.IgfsEx) IgniteCheckedException(org.apache.ignite.IgniteCheckedException) IgniteException(org.apache.ignite.IgniteException) UUID(java.util.UUID) HashMap(java.util.HashMap) Map(java.util.Map) IdentityHashMap(java.util.IdentityHashMap) TreeMap(java.util.TreeMap) Nullable(org.jetbrains.annotations.Nullable)

Example 13 with IgfsPath

use of org.apache.ignite.igfs.IgfsPath in project ignite by apache.

the class IgniteHadoopFileSystem method setTimes.

/** {@inheritDoc} */
@Override
public void setTimes(Path p, long mtime, long atime) throws IOException {
    enterBusy();
    try {
        A.notNull(p, "p");
        IgfsPath path = convert(p);
        rmtClient.setTimes(path, atime, mtime);
    } finally {
        leaveBusy();
    }
}
Also used : IgfsPath(org.apache.ignite.igfs.IgfsPath)

Example 14 with IgfsPath

use of org.apache.ignite.igfs.IgfsPath in project ignite by apache.

the class IgniteHadoopFileSystem method open.

/** {@inheritDoc} */
@Override
public FSDataInputStream open(Path f, int bufSize) throws IOException {
    A.notNull(f, "f");
    enterBusy();
    try {
        IgfsPath path = convert(f);
        HadoopIgfsStreamDelegate stream = seqReadsBeforePrefetchOverride ? rmtClient.open(path, seqReadsBeforePrefetch) : rmtClient.open(path);
        long logId = -1;
        if (clientLog.isLogEnabled()) {
            logId = IgfsLogger.nextId();
            clientLog.logOpen(logId, path, bufSize, stream.length());
        }
        if (LOG.isDebugEnabled())
            LOG.debug("Opening input stream [thread=" + Thread.currentThread().getName() + ", path=" + path + ", bufSize=" + bufSize + ']');
        HadoopIgfsInputStream igfsIn = new HadoopIgfsInputStream(stream, stream.length(), bufSize, LOG, clientLog, logId);
        if (LOG.isDebugEnabled())
            LOG.debug("Opened input stream [path=" + path + ", delegate=" + stream + ']');
        return new FSDataInputStream(igfsIn);
    } finally {
        leaveBusy();
    }
}
Also used : IgfsPath(org.apache.ignite.igfs.IgfsPath) HadoopIgfsInputStream(org.apache.ignite.internal.processors.hadoop.impl.igfs.HadoopIgfsInputStream) HadoopIgfsStreamDelegate(org.apache.ignite.internal.processors.hadoop.impl.igfs.HadoopIgfsStreamDelegate) FSDataInputStream(org.apache.hadoop.fs.FSDataInputStream)

Example 15 with IgfsPath

use of org.apache.ignite.igfs.IgfsPath in project ignite by apache.

the class IgniteHadoopFileSystem method create.

/** {@inheritDoc} */
@SuppressWarnings("deprecation")
@Override
public FSDataOutputStream create(Path f, final FsPermission perm, boolean overwrite, int bufSize, short replication, long blockSize, Progressable progress) throws IOException {
    A.notNull(f, "f");
    enterBusy();
    OutputStream out = null;
    try {
        IgfsPath path = convert(f);
        if (LOG.isDebugEnabled())
            LOG.debug("Opening output stream in create [thread=" + Thread.currentThread().getName() + "path=" + path + ", overwrite=" + overwrite + ", bufSize=" + bufSize + ']');
        Map<String, String> propMap = permission(perm);
        propMap.put(IgfsUtils.PROP_PREFER_LOCAL_WRITES, Boolean.toString(preferLocFileWrites));
        // Create stream and close it in the 'finally' section if any sequential operation failed.
        HadoopIgfsStreamDelegate stream = rmtClient.create(path, overwrite, colocateFileWrites, replication, blockSize, propMap);
        assert stream != null;
        long logId = -1;
        if (clientLog.isLogEnabled()) {
            logId = IgfsLogger.nextId();
            clientLog.logCreate(logId, path, overwrite, bufSize, replication, blockSize);
        }
        if (LOG.isDebugEnabled())
            LOG.debug("Opened output stream in create [path=" + path + ", delegate=" + stream + ']');
        HadoopIgfsOutputStream igfsOut = new HadoopIgfsOutputStream(stream, LOG, clientLog, logId);
        bufSize = Math.max(64 * 1024, bufSize);
        out = new BufferedOutputStream(igfsOut, bufSize);
        FSDataOutputStream res = new FSDataOutputStream(out, null, 0);
        // Mark stream created successfully.
        out = null;
        return res;
    } finally {
        // Close if failed during stream creation.
        if (out != null)
            U.closeQuiet(out);
        leaveBusy();
    }
}
Also used : IgfsPath(org.apache.ignite.igfs.IgfsPath) HadoopIgfsStreamDelegate(org.apache.ignite.internal.processors.hadoop.impl.igfs.HadoopIgfsStreamDelegate) HadoopIgfsOutputStream(org.apache.ignite.internal.processors.hadoop.impl.igfs.HadoopIgfsOutputStream) BufferedOutputStream(java.io.BufferedOutputStream) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream) OutputStream(java.io.OutputStream) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream) HadoopIgfsOutputStream(org.apache.ignite.internal.processors.hadoop.impl.igfs.HadoopIgfsOutputStream) BufferedOutputStream(java.io.BufferedOutputStream)

Aggregations

IgfsPath (org.apache.ignite.igfs.IgfsPath)161 IgfsOutputStream (org.apache.ignite.igfs.IgfsOutputStream)23 IOException (java.io.IOException)22 ArrayList (java.util.ArrayList)15 IgniteCheckedException (org.apache.ignite.IgniteCheckedException)14 HashMap (java.util.HashMap)13 IgniteException (org.apache.ignite.IgniteException)13 IgniteFileSystem (org.apache.ignite.IgniteFileSystem)13 IgfsFile (org.apache.ignite.igfs.IgfsFile)13 IgfsException (org.apache.ignite.igfs.IgfsException)12 IgniteUuid (org.apache.ignite.lang.IgniteUuid)11 IgfsBlockLocation (org.apache.ignite.igfs.IgfsBlockLocation)10 IgfsInputStream (org.apache.ignite.igfs.IgfsInputStream)10 Map (java.util.Map)9 Path (org.apache.hadoop.fs.Path)9 IgfsPathNotFoundException (org.apache.ignite.igfs.IgfsPathNotFoundException)9 FileNotFoundException (java.io.FileNotFoundException)6 OutputStream (java.io.OutputStream)6 IgfsDirectoryNotEmptyException (org.apache.ignite.igfs.IgfsDirectoryNotEmptyException)6 IgfsParentNotDirectoryException (org.apache.ignite.igfs.IgfsParentNotDirectoryException)6