Search in sources :

Example 1 with HadoopIgfsStreamDelegate

use of org.apache.ignite.internal.processors.hadoop.impl.igfs.HadoopIgfsStreamDelegate in project ignite by apache.

the class IgniteHadoopFileSystem method create.

/**
 * {@inheritDoc}
 */
@SuppressWarnings("deprecation")
@Override
public FSDataOutputStream create(Path f, final FsPermission perm, boolean overwrite, int bufSize, short replication, long blockSize, Progressable progress) throws IOException {
    A.notNull(f, "f");
    enterBusy();
    OutputStream out = null;
    try {
        IgfsPath path = convert(f);
        if (LOG.isDebugEnabled())
            LOG.debug("Opening output stream in create [thread=" + Thread.currentThread().getName() + "path=" + path + ", overwrite=" + overwrite + ", bufSize=" + bufSize + ']');
        Map<String, String> propMap = permission(perm);
        propMap.put(IgfsUtils.PROP_PREFER_LOCAL_WRITES, Boolean.toString(preferLocFileWrites));
        // Create stream and close it in the 'finally' section if any sequential operation failed.
        HadoopIgfsStreamDelegate stream = rmtClient.create(path, overwrite, colocateFileWrites, replication, blockSize, propMap);
        assert stream != null;
        long logId = -1;
        if (clientLog.isLogEnabled()) {
            logId = IgfsLogger.nextId();
            clientLog.logCreate(logId, path, overwrite, bufSize, replication, blockSize);
        }
        if (LOG.isDebugEnabled())
            LOG.debug("Opened output stream in create [path=" + path + ", delegate=" + stream + ']');
        HadoopIgfsOutputStream igfsOut = new HadoopIgfsOutputStream(stream, LOG, clientLog, logId);
        bufSize = Math.max(64 * 1024, bufSize);
        out = new BufferedOutputStream(igfsOut, bufSize);
        FSDataOutputStream res = new FSDataOutputStream(out, null, 0);
        // Mark stream created successfully.
        out = null;
        return res;
    } finally {
        // Close if failed during stream creation.
        if (out != null)
            U.closeQuiet(out);
        leaveBusy();
    }
}
Also used : IgfsPath(org.apache.ignite.igfs.IgfsPath) HadoopIgfsStreamDelegate(org.apache.ignite.internal.processors.hadoop.impl.igfs.HadoopIgfsStreamDelegate) HadoopIgfsOutputStream(org.apache.ignite.internal.processors.hadoop.impl.igfs.HadoopIgfsOutputStream) BufferedOutputStream(java.io.BufferedOutputStream) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream) OutputStream(java.io.OutputStream) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream) HadoopIgfsOutputStream(org.apache.ignite.internal.processors.hadoop.impl.igfs.HadoopIgfsOutputStream) BufferedOutputStream(java.io.BufferedOutputStream)

Example 2 with HadoopIgfsStreamDelegate

use of org.apache.ignite.internal.processors.hadoop.impl.igfs.HadoopIgfsStreamDelegate in project ignite by apache.

the class IgniteHadoopFileSystem method append.

/**
 * {@inheritDoc}
 */
@SuppressWarnings("deprecation")
@Override
public FSDataOutputStream append(Path f, int bufSize, Progressable progress) throws IOException {
    A.notNull(f, "f");
    enterBusy();
    try {
        IgfsPath path = convert(f);
        if (LOG.isDebugEnabled())
            LOG.debug("Opening output stream in append [thread=" + Thread.currentThread().getName() + ", path=" + path + ", bufSize=" + bufSize + ']');
        HadoopIgfsStreamDelegate stream = rmtClient.append(path, false, null);
        assert stream != null;
        long logId = -1;
        if (clientLog.isLogEnabled()) {
            logId = IgfsLogger.nextId();
            clientLog.logAppend(logId, path, bufSize);
        }
        if (LOG.isDebugEnabled())
            LOG.debug("Opened output stream in append [path=" + path + ", delegate=" + stream + ']');
        HadoopIgfsOutputStream igfsOut = new HadoopIgfsOutputStream(stream, LOG, clientLog, logId);
        bufSize = Math.max(64 * 1024, bufSize);
        BufferedOutputStream out = new BufferedOutputStream(igfsOut, bufSize);
        return new FSDataOutputStream(out, null, 0);
    } finally {
        leaveBusy();
    }
}
Also used : IgfsPath(org.apache.ignite.igfs.IgfsPath) HadoopIgfsStreamDelegate(org.apache.ignite.internal.processors.hadoop.impl.igfs.HadoopIgfsStreamDelegate) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream) HadoopIgfsOutputStream(org.apache.ignite.internal.processors.hadoop.impl.igfs.HadoopIgfsOutputStream) BufferedOutputStream(java.io.BufferedOutputStream)

Example 3 with HadoopIgfsStreamDelegate

use of org.apache.ignite.internal.processors.hadoop.impl.igfs.HadoopIgfsStreamDelegate in project ignite by apache.

the class IgniteHadoopFileSystem method open.

/**
 * {@inheritDoc}
 */
@Override
public FSDataInputStream open(Path f, int bufSize) throws IOException {
    A.notNull(f, "f");
    enterBusy();
    try {
        IgfsPath path = convert(f);
        HadoopIgfsStreamDelegate stream = seqReadsBeforePrefetchOverride ? rmtClient.open(path, seqReadsBeforePrefetch) : rmtClient.open(path);
        long logId = -1;
        if (clientLog.isLogEnabled()) {
            logId = IgfsLogger.nextId();
            clientLog.logOpen(logId, path, bufSize, stream.length());
        }
        if (LOG.isDebugEnabled())
            LOG.debug("Opening input stream [thread=" + Thread.currentThread().getName() + ", path=" + path + ", bufSize=" + bufSize + ']');
        HadoopIgfsInputStream igfsIn = new HadoopIgfsInputStream(stream, stream.length(), bufSize, LOG, clientLog, logId);
        if (LOG.isDebugEnabled())
            LOG.debug("Opened input stream [path=" + path + ", delegate=" + stream + ']');
        return new FSDataInputStream(igfsIn);
    } finally {
        leaveBusy();
    }
}
Also used : IgfsPath(org.apache.ignite.igfs.IgfsPath) HadoopIgfsInputStream(org.apache.ignite.internal.processors.hadoop.impl.igfs.HadoopIgfsInputStream) HadoopIgfsStreamDelegate(org.apache.ignite.internal.processors.hadoop.impl.igfs.HadoopIgfsStreamDelegate) FSDataInputStream(org.apache.hadoop.fs.FSDataInputStream)

Example 4 with HadoopIgfsStreamDelegate

use of org.apache.ignite.internal.processors.hadoop.impl.igfs.HadoopIgfsStreamDelegate in project ignite by apache.

the class IgniteHadoopFileSystem method createInternal.

/**
 * {@inheritDoc}
 */
@SuppressWarnings("deprecation")
@Override
public FSDataOutputStream createInternal(Path f, EnumSet<CreateFlag> flag, FsPermission perm, int bufSize, short replication, long blockSize, Progressable progress, Options.ChecksumOpt checksumOpt, boolean createParent) throws IOException {
    A.notNull(f, "f");
    enterBusy();
    boolean overwrite = flag.contains(CreateFlag.OVERWRITE);
    boolean append = flag.contains(CreateFlag.APPEND);
    boolean create = flag.contains(CreateFlag.CREATE);
    OutputStream out = null;
    try {
        IgfsPath path = convert(f);
        if (LOG.isDebugEnabled())
            LOG.debug("Opening output stream in create [thread=" + Thread.currentThread().getName() + "path=" + path + ", overwrite=" + overwrite + ", bufSize=" + bufSize + ']');
        Map<String, String> permMap = F.asMap(IgfsUtils.PROP_PERMISSION, toString(perm), IgfsUtils.PROP_PREFER_LOCAL_WRITES, Boolean.toString(preferLocFileWrites));
        // Create stream and close it in the 'finally' section if any sequential operation failed.
        HadoopIgfsStreamDelegate stream;
        long logId = -1;
        if (append) {
            stream = rmtClient.append(path, create, permMap);
            if (clientLog.isLogEnabled()) {
                logId = IgfsLogger.nextId();
                clientLog.logAppend(logId, path, bufSize);
            }
            if (LOG.isDebugEnabled())
                LOG.debug("Opened output stream in append [path=" + path + ", delegate=" + stream + ']');
        } else {
            stream = rmtClient.create(path, overwrite, colocateFileWrites, replication, blockSize, permMap);
            if (clientLog.isLogEnabled()) {
                logId = IgfsLogger.nextId();
                clientLog.logCreate(logId, path, overwrite, bufSize, replication, blockSize);
            }
            if (LOG.isDebugEnabled())
                LOG.debug("Opened output stream in create [path=" + path + ", delegate=" + stream + ']');
        }
        assert stream != null;
        HadoopIgfsOutputStream igfsOut = new HadoopIgfsOutputStream(stream, LOG, clientLog, logId);
        bufSize = Math.max(64 * 1024, bufSize);
        out = new BufferedOutputStream(igfsOut, bufSize);
        FSDataOutputStream res = new FSDataOutputStream(out, null, 0);
        // Mark stream created successfully.
        out = null;
        return res;
    } finally {
        // Close if failed during stream creation.
        if (out != null)
            U.closeQuiet(out);
        leaveBusy();
    }
}
Also used : IgfsPath(org.apache.ignite.igfs.IgfsPath) HadoopIgfsStreamDelegate(org.apache.ignite.internal.processors.hadoop.impl.igfs.HadoopIgfsStreamDelegate) HadoopIgfsOutputStream(org.apache.ignite.internal.processors.hadoop.impl.igfs.HadoopIgfsOutputStream) BufferedOutputStream(java.io.BufferedOutputStream) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream) OutputStream(java.io.OutputStream) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream) HadoopIgfsOutputStream(org.apache.ignite.internal.processors.hadoop.impl.igfs.HadoopIgfsOutputStream) BufferedOutputStream(java.io.BufferedOutputStream)

Example 5 with HadoopIgfsStreamDelegate

use of org.apache.ignite.internal.processors.hadoop.impl.igfs.HadoopIgfsStreamDelegate in project ignite by apache.

the class IgniteHadoopFileSystem method open.

/**
 * {@inheritDoc}
 */
@Override
public FSDataInputStream open(Path f, int bufSize) throws IOException {
    A.notNull(f, "f");
    enterBusy();
    try {
        IgfsPath path = convert(f);
        HadoopIgfsStreamDelegate stream = seqReadsBeforePrefetchOverride ? rmtClient.open(path, seqReadsBeforePrefetch) : rmtClient.open(path);
        long logId = -1;
        if (clientLog.isLogEnabled()) {
            logId = IgfsLogger.nextId();
            clientLog.logOpen(logId, path, bufSize, stream.length());
        }
        if (LOG.isDebugEnabled())
            LOG.debug("Opening input stream [thread=" + Thread.currentThread().getName() + ", path=" + path + ", bufSize=" + bufSize + ']');
        HadoopIgfsInputStream igfsIn = new HadoopIgfsInputStream(stream, stream.length(), bufSize, LOG, clientLog, logId);
        if (LOG.isDebugEnabled())
            LOG.debug("Opened input stream [path=" + path + ", delegate=" + stream + ']');
        return new FSDataInputStream(igfsIn);
    } finally {
        leaveBusy();
    }
}
Also used : IgfsPath(org.apache.ignite.igfs.IgfsPath) HadoopIgfsInputStream(org.apache.ignite.internal.processors.hadoop.impl.igfs.HadoopIgfsInputStream) HadoopIgfsStreamDelegate(org.apache.ignite.internal.processors.hadoop.impl.igfs.HadoopIgfsStreamDelegate) FSDataInputStream(org.apache.hadoop.fs.FSDataInputStream)

Aggregations

IgfsPath (org.apache.ignite.igfs.IgfsPath)5 HadoopIgfsStreamDelegate (org.apache.ignite.internal.processors.hadoop.impl.igfs.HadoopIgfsStreamDelegate)5 BufferedOutputStream (java.io.BufferedOutputStream)3 FSDataOutputStream (org.apache.hadoop.fs.FSDataOutputStream)3 HadoopIgfsOutputStream (org.apache.ignite.internal.processors.hadoop.impl.igfs.HadoopIgfsOutputStream)3 OutputStream (java.io.OutputStream)2 FSDataInputStream (org.apache.hadoop.fs.FSDataInputStream)2 HadoopIgfsInputStream (org.apache.ignite.internal.processors.hadoop.impl.igfs.HadoopIgfsInputStream)2