use of org.apache.hadoop.hbase.util.CommonFSUtils.StreamLacksCapabilityException in project hbase by apache.
the class AsyncFSOutputHelper method createOutput.
/**
* Create {@link FanOutOneBlockAsyncDFSOutput} for {@link DistributedFileSystem}, and a simple
* implementation for other {@link FileSystem} which wraps around a {@link FSDataOutputStream}.
*/
public static AsyncFSOutput createOutput(FileSystem fs, Path f, boolean overwrite, boolean createParent, short replication, long blockSize, EventLoopGroup eventLoopGroup, Class<? extends Channel> channelClass, StreamSlowMonitor monitor) throws IOException, CommonFSUtils.StreamLacksCapabilityException {
if (fs instanceof DistributedFileSystem) {
return FanOutOneBlockAsyncDFSOutputHelper.createOutput((DistributedFileSystem) fs, f, overwrite, createParent, replication, blockSize, eventLoopGroup, channelClass, monitor);
}
final FSDataOutputStream out;
int bufferSize = fs.getConf().getInt(CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_KEY, CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_DEFAULT);
// This is not a Distributed File System, so it won't be erasure coded; no builder API needed
if (createParent) {
out = fs.create(f, overwrite, bufferSize, replication, blockSize, null);
} else {
out = fs.createNonRecursive(f, overwrite, bufferSize, replication, blockSize, null);
}
// to provide.
if (fs.getConf().getBoolean(CommonFSUtils.UNSAFE_STREAM_CAPABILITY_ENFORCE, true)) {
if (!out.hasCapability(StreamCapabilities.HFLUSH)) {
Closeables.close(out, true);
throw new StreamLacksCapabilityException(StreamCapabilities.HFLUSH);
}
if (!out.hasCapability(StreamCapabilities.HSYNC)) {
Closeables.close(out, true);
throw new StreamLacksCapabilityException(StreamCapabilities.HSYNC);
}
}
return new WrapperAsyncFSOutput(f, out);
}
Aggregations