use of org.apache.hadoop.fs.Options.CreateOpts in project hadoop by apache.
the class FileSystemTestWrapper method createFile.
/*
* Create files with numBlocks blocks each with block size blockSize.
*/
public long createFile(Path path, int numBlocks, CreateOpts... options) throws IOException {
BlockSize blockSizeOpt = CreateOpts.getOpt(CreateOpts.BlockSize.class, options);
long blockSize = blockSizeOpt != null ? blockSizeOpt.getValue() : DEFAULT_BLOCK_SIZE;
FSDataOutputStream out = create(path, EnumSet.of(CreateFlag.CREATE), options);
byte[] data = getFileData(numBlocks, blockSize);
out.write(data, 0, data.length);
out.close();
return data.length;
}
use of org.apache.hadoop.fs.Options.CreateOpts in project hadoop by apache.
the class FileSystemTestWrapper method appendToFile.
public void appendToFile(Path path, int numBlocks, CreateOpts... options) throws IOException {
BlockSize blockSizeOpt = CreateOpts.getOpt(CreateOpts.BlockSize.class, options);
long blockSize = blockSizeOpt != null ? blockSizeOpt.getValue() : DEFAULT_BLOCK_SIZE;
FSDataOutputStream out;
out = fs.append(path);
byte[] data = getFileData(numBlocks, blockSize);
out.write(data, 0, data.length);
out.close();
}
use of org.apache.hadoop.fs.Options.CreateOpts in project hadoop by apache.
the class AbstractFileSystem method create.
/**
* The specification of this method matches that of
* {@link FileContext#create(Path, EnumSet, Options.CreateOpts...)} except
* that the Path f must be fully qualified and the permission is absolute
* (i.e. umask has been applied).
*/
public final FSDataOutputStream create(final Path f, final EnumSet<CreateFlag> createFlag, Options.CreateOpts... opts) throws AccessControlException, FileAlreadyExistsException, FileNotFoundException, ParentNotDirectoryException, UnsupportedFileSystemException, UnresolvedLinkException, IOException {
checkPath(f);
int bufferSize = -1;
short replication = -1;
long blockSize = -1;
int bytesPerChecksum = -1;
ChecksumOpt checksumOpt = null;
FsPermission permission = null;
Progressable progress = null;
Boolean createParent = null;
for (CreateOpts iOpt : opts) {
if (CreateOpts.BlockSize.class.isInstance(iOpt)) {
if (blockSize != -1) {
throw new HadoopIllegalArgumentException("BlockSize option is set multiple times");
}
blockSize = ((CreateOpts.BlockSize) iOpt).getValue();
} else if (CreateOpts.BufferSize.class.isInstance(iOpt)) {
if (bufferSize != -1) {
throw new HadoopIllegalArgumentException("BufferSize option is set multiple times");
}
bufferSize = ((CreateOpts.BufferSize) iOpt).getValue();
} else if (CreateOpts.ReplicationFactor.class.isInstance(iOpt)) {
if (replication != -1) {
throw new HadoopIllegalArgumentException("ReplicationFactor option is set multiple times");
}
replication = ((CreateOpts.ReplicationFactor) iOpt).getValue();
} else if (CreateOpts.BytesPerChecksum.class.isInstance(iOpt)) {
if (bytesPerChecksum != -1) {
throw new HadoopIllegalArgumentException("BytesPerChecksum option is set multiple times");
}
bytesPerChecksum = ((CreateOpts.BytesPerChecksum) iOpt).getValue();
} else if (CreateOpts.ChecksumParam.class.isInstance(iOpt)) {
if (checksumOpt != null) {
throw new HadoopIllegalArgumentException("CreateChecksumType option is set multiple times");
}
checksumOpt = ((CreateOpts.ChecksumParam) iOpt).getValue();
} else if (CreateOpts.Perms.class.isInstance(iOpt)) {
if (permission != null) {
throw new HadoopIllegalArgumentException("Perms option is set multiple times");
}
permission = ((CreateOpts.Perms) iOpt).getValue();
} else if (CreateOpts.Progress.class.isInstance(iOpt)) {
if (progress != null) {
throw new HadoopIllegalArgumentException("Progress option is set multiple times");
}
progress = ((CreateOpts.Progress) iOpt).getValue();
} else if (CreateOpts.CreateParent.class.isInstance(iOpt)) {
if (createParent != null) {
throw new HadoopIllegalArgumentException("CreateParent option is set multiple times");
}
createParent = ((CreateOpts.CreateParent) iOpt).getValue();
} else {
throw new HadoopIllegalArgumentException("Unkown CreateOpts of type " + iOpt.getClass().getName());
}
}
if (permission == null) {
throw new HadoopIllegalArgumentException("no permission supplied");
}
FsServerDefaults ssDef = getServerDefaults();
if (ssDef.getBlockSize() % ssDef.getBytesPerChecksum() != 0) {
throw new IOException("Internal error: default blockSize is" + " not a multiple of default bytesPerChecksum ");
}
if (blockSize == -1) {
blockSize = ssDef.getBlockSize();
}
// Create a checksum option honoring user input as much as possible.
// If bytesPerChecksum is specified, it will override the one set in
// checksumOpt. Any missing value will be filled in using the default.
ChecksumOpt defaultOpt = new ChecksumOpt(ssDef.getChecksumType(), ssDef.getBytesPerChecksum());
checksumOpt = ChecksumOpt.processChecksumOpt(defaultOpt, checksumOpt, bytesPerChecksum);
if (bufferSize == -1) {
bufferSize = ssDef.getFileBufferSize();
}
if (replication == -1) {
replication = ssDef.getReplication();
}
if (createParent == null) {
createParent = false;
}
if (blockSize % bytesPerChecksum != 0) {
throw new HadoopIllegalArgumentException("blockSize should be a multiple of checksumsize");
}
return this.createInternal(f, createFlag, permission, bufferSize, replication, blockSize, progress, checksumOpt, createParent);
}
use of org.apache.hadoop.fs.Options.CreateOpts in project hadoop by apache.
the class FileContext method create.
/**
* Create or overwrite file on indicated path and returns an output stream for
* writing into the file.
*
* @param f the file name to open
* @param createFlag gives the semantics of create; see {@link CreateFlag}
* @param opts file creation options; see {@link Options.CreateOpts}.
* <ul>
* <li>Progress - to report progress on the operation - default null
* <li>Permission - umask is applied against permission: default is
* FsPermissions:getDefault()
*
* <li>CreateParent - create missing parent path; default is to not
* to create parents
* <li>The defaults for the following are SS defaults of the file
* server implementing the target path. Not all parameters make sense
* for all kinds of file system - eg. localFS ignores Blocksize,
* replication, checksum
* <ul>
* <li>BufferSize - buffersize used in FSDataOutputStream
* <li>Blocksize - block size for file blocks
* <li>ReplicationFactor - replication for blocks
* <li>ChecksumParam - Checksum parameters. server default is used
* if not specified.
* </ul>
* </ul>
*
* @return {@link FSDataOutputStream} for created file
*
* @throws AccessControlException If access is denied
* @throws FileAlreadyExistsException If file <code>f</code> already exists
* @throws FileNotFoundException If parent of <code>f</code> does not exist
* and <code>createParent</code> is false
* @throws ParentNotDirectoryException If parent of <code>f</code> is not a
* directory.
* @throws UnsupportedFileSystemException If file system for <code>f</code> is
* not supported
* @throws IOException If an I/O error occurred
*
* Exceptions applicable to file systems accessed over RPC:
* @throws RpcClientException If an exception occurred in the RPC client
* @throws RpcServerException If an exception occurred in the RPC server
* @throws UnexpectedServerException If server implementation throws
* undeclared exception to RPC server
*
* RuntimeExceptions:
* @throws InvalidPathException If path <code>f</code> is not valid
*/
public FSDataOutputStream create(final Path f, final EnumSet<CreateFlag> createFlag, Options.CreateOpts... opts) throws AccessControlException, FileAlreadyExistsException, FileNotFoundException, ParentNotDirectoryException, UnsupportedFileSystemException, IOException {
Path absF = fixRelativePart(f);
// If one of the options is a permission, extract it & apply umask
// If not, add a default Perms and apply umask;
// AbstractFileSystem#create
CreateOpts.Perms permOpt = CreateOpts.getOpt(CreateOpts.Perms.class, opts);
FsPermission permission = (permOpt != null) ? permOpt.getValue() : FILE_DEFAULT_PERM;
permission = FsCreateModes.applyUMask(permission, getUMask());
final CreateOpts[] updatedOpts = CreateOpts.setOpt(CreateOpts.perms(permission), opts);
return new FSLinkResolver<FSDataOutputStream>() {
@Override
public FSDataOutputStream next(final AbstractFileSystem fs, final Path p) throws IOException {
return fs.create(p, createFlag, updatedOpts);
}
}.resolve(this, absF);
}
Aggregations