use of org.apache.ignite.internal.processors.hadoop.impl.igfs.HadoopIgfsOutputStream in project ignite by apache.
the class IgniteHadoopFileSystem method create.
/**
* {@inheritDoc}
*/
@SuppressWarnings("deprecation")
@Override
public FSDataOutputStream create(Path f, final FsPermission perm, boolean overwrite, int bufSize, short replication, long blockSize, Progressable progress) throws IOException {
A.notNull(f, "f");
enterBusy();
OutputStream out = null;
try {
IgfsPath path = convert(f);
if (LOG.isDebugEnabled())
LOG.debug("Opening output stream in create [thread=" + Thread.currentThread().getName() + "path=" + path + ", overwrite=" + overwrite + ", bufSize=" + bufSize + ']');
Map<String, String> propMap = permission(perm);
propMap.put(IgfsUtils.PROP_PREFER_LOCAL_WRITES, Boolean.toString(preferLocFileWrites));
// Create stream and close it in the 'finally' section if any sequential operation failed.
HadoopIgfsStreamDelegate stream = rmtClient.create(path, overwrite, colocateFileWrites, replication, blockSize, propMap);
assert stream != null;
long logId = -1;
if (clientLog.isLogEnabled()) {
logId = IgfsLogger.nextId();
clientLog.logCreate(logId, path, overwrite, bufSize, replication, blockSize);
}
if (LOG.isDebugEnabled())
LOG.debug("Opened output stream in create [path=" + path + ", delegate=" + stream + ']');
HadoopIgfsOutputStream igfsOut = new HadoopIgfsOutputStream(stream, LOG, clientLog, logId);
bufSize = Math.max(64 * 1024, bufSize);
out = new BufferedOutputStream(igfsOut, bufSize);
FSDataOutputStream res = new FSDataOutputStream(out, null, 0);
// Mark stream created successfully.
out = null;
return res;
} finally {
// Close if failed during stream creation.
if (out != null)
U.closeQuiet(out);
leaveBusy();
}
}
use of org.apache.ignite.internal.processors.hadoop.impl.igfs.HadoopIgfsOutputStream in project ignite by apache.
the class IgniteHadoopFileSystem method append.
/**
* {@inheritDoc}
*/
@SuppressWarnings("deprecation")
@Override
public FSDataOutputStream append(Path f, int bufSize, Progressable progress) throws IOException {
A.notNull(f, "f");
enterBusy();
try {
IgfsPath path = convert(f);
if (LOG.isDebugEnabled())
LOG.debug("Opening output stream in append [thread=" + Thread.currentThread().getName() + ", path=" + path + ", bufSize=" + bufSize + ']');
HadoopIgfsStreamDelegate stream = rmtClient.append(path, false, null);
assert stream != null;
long logId = -1;
if (clientLog.isLogEnabled()) {
logId = IgfsLogger.nextId();
clientLog.logAppend(logId, path, bufSize);
}
if (LOG.isDebugEnabled())
LOG.debug("Opened output stream in append [path=" + path + ", delegate=" + stream + ']');
HadoopIgfsOutputStream igfsOut = new HadoopIgfsOutputStream(stream, LOG, clientLog, logId);
bufSize = Math.max(64 * 1024, bufSize);
BufferedOutputStream out = new BufferedOutputStream(igfsOut, bufSize);
return new FSDataOutputStream(out, null, 0);
} finally {
leaveBusy();
}
}
use of org.apache.ignite.internal.processors.hadoop.impl.igfs.HadoopIgfsOutputStream in project ignite by apache.
the class IgniteHadoopFileSystem method createInternal.
/**
* {@inheritDoc}
*/
@SuppressWarnings("deprecation")
@Override
public FSDataOutputStream createInternal(Path f, EnumSet<CreateFlag> flag, FsPermission perm, int bufSize, short replication, long blockSize, Progressable progress, Options.ChecksumOpt checksumOpt, boolean createParent) throws IOException {
A.notNull(f, "f");
enterBusy();
boolean overwrite = flag.contains(CreateFlag.OVERWRITE);
boolean append = flag.contains(CreateFlag.APPEND);
boolean create = flag.contains(CreateFlag.CREATE);
OutputStream out = null;
try {
IgfsPath path = convert(f);
if (LOG.isDebugEnabled())
LOG.debug("Opening output stream in create [thread=" + Thread.currentThread().getName() + "path=" + path + ", overwrite=" + overwrite + ", bufSize=" + bufSize + ']');
Map<String, String> permMap = F.asMap(IgfsUtils.PROP_PERMISSION, toString(perm), IgfsUtils.PROP_PREFER_LOCAL_WRITES, Boolean.toString(preferLocFileWrites));
// Create stream and close it in the 'finally' section if any sequential operation failed.
HadoopIgfsStreamDelegate stream;
long logId = -1;
if (append) {
stream = rmtClient.append(path, create, permMap);
if (clientLog.isLogEnabled()) {
logId = IgfsLogger.nextId();
clientLog.logAppend(logId, path, bufSize);
}
if (LOG.isDebugEnabled())
LOG.debug("Opened output stream in append [path=" + path + ", delegate=" + stream + ']');
} else {
stream = rmtClient.create(path, overwrite, colocateFileWrites, replication, blockSize, permMap);
if (clientLog.isLogEnabled()) {
logId = IgfsLogger.nextId();
clientLog.logCreate(logId, path, overwrite, bufSize, replication, blockSize);
}
if (LOG.isDebugEnabled())
LOG.debug("Opened output stream in create [path=" + path + ", delegate=" + stream + ']');
}
assert stream != null;
HadoopIgfsOutputStream igfsOut = new HadoopIgfsOutputStream(stream, LOG, clientLog, logId);
bufSize = Math.max(64 * 1024, bufSize);
out = new BufferedOutputStream(igfsOut, bufSize);
FSDataOutputStream res = new FSDataOutputStream(out, null, 0);
// Mark stream created successfully.
out = null;
return res;
} finally {
// Close if failed during stream creation.
if (out != null)
U.closeQuiet(out);
leaveBusy();
}
}
Aggregations