use of org.apache.hadoop.fs.CreateFlag in project hadoop by apache.
the class TestNamenodeRetryCache method testCreate.
/**
* Test for create file
*/
@Test
public void testCreate() throws Exception {
String src = "/testNamenodeRetryCache/testCreate/file";
// Two retried calls succeed
newCall();
HdfsFileStatus status = nnRpc.create(src, perm, "holder", new EnumSetWritable<CreateFlag>(EnumSet.of(CreateFlag.CREATE)), true, (short) 1, BlockSize, null);
Assert.assertEquals(status, nnRpc.create(src, perm, "holder", new EnumSetWritable<CreateFlag>(EnumSet.of(CreateFlag.CREATE)), true, (short) 1, BlockSize, null));
Assert.assertEquals(status, nnRpc.create(src, perm, "holder", new EnumSetWritable<CreateFlag>(EnumSet.of(CreateFlag.CREATE)), true, (short) 1, BlockSize, null));
// A non-retried call fails
newCall();
try {
nnRpc.create(src, perm, "holder", new EnumSetWritable<CreateFlag>(EnumSet.of(CreateFlag.CREATE)), true, (short) 1, BlockSize, null);
Assert.fail("testCreate - expected exception is not thrown");
} catch (IOException e) {
// expected
}
}
use of org.apache.hadoop.fs.CreateFlag in project hadoop by apache.
the class AdlFileSystem method createNonRecursive.
/**
* Opens an FSDataOutputStream at the indicated Path with write-progress
* reporting. Same as create(), except fails if parent directory doesn't
* already exist.
*
* @param f the file name to open
* @param permission Access permission for the newly created file
* @param flags {@link CreateFlag}s to use for this stream.
* @param bufferSize the size of the buffer to be used. ADL backend does
* not honour
* @param replication required block replication for the file. ADL backend
* does not honour
* @param blockSize Block size, ADL backend does not honour
* @param progress Progress indicator
* @throws IOException when system error, internal server error or user error
* @see #setPermission(Path, FsPermission)
* @deprecated API only for 0.20-append
*/
@Override
public FSDataOutputStream createNonRecursive(Path f, FsPermission permission, EnumSet<CreateFlag> flags, int bufferSize, short replication, long blockSize, Progressable progress) throws IOException {
statistics.incrementWriteOps(1);
IfExists overwriteRule = IfExists.FAIL;
for (CreateFlag flag : flags) {
if (flag == CreateFlag.OVERWRITE) {
overwriteRule = IfExists.OVERWRITE;
break;
}
}
return new FSDataOutputStream(new AdlFsOutputStream(adlClient.createFile(toRelativeFilePath(f), overwriteRule, Integer.toOctalString(applyUMask(permission).toShort()), false), getConf()), this.statistics);
}
use of org.apache.hadoop.fs.CreateFlag in project hadoop by apache.
the class RpcProgramNfs3 method create.
@VisibleForTesting
CREATE3Response create(XDR xdr, SecurityHandler securityHandler, SocketAddress remoteAddress) {
CREATE3Response response = new CREATE3Response(Nfs3Status.NFS3_OK);
DFSClient dfsClient = clientCache.getDfsClient(securityHandler.getUser());
if (dfsClient == null) {
response.setStatus(Nfs3Status.NFS3ERR_SERVERFAULT);
return response;
}
CREATE3Request request;
try {
request = CREATE3Request.deserialize(xdr);
} catch (IOException e) {
LOG.error("Invalid CREATE request");
return new CREATE3Response(Nfs3Status.NFS3ERR_INVAL);
}
FileHandle dirHandle = request.getHandle();
String fileName = request.getName();
if (LOG.isDebugEnabled()) {
LOG.debug("NFS CREATE dir fileId: " + dirHandle.getFileId() + " filename: " + fileName + " client: " + remoteAddress);
}
int createMode = request.getMode();
if ((createMode != Nfs3Constant.CREATE_EXCLUSIVE) && request.getObjAttr().getUpdateFields().contains(SetAttrField.SIZE) && request.getObjAttr().getSize() != 0) {
LOG.error("Setting file size is not supported when creating file: " + fileName + " dir fileId: " + dirHandle.getFileId());
return new CREATE3Response(Nfs3Status.NFS3ERR_INVAL);
}
HdfsDataOutputStream fos = null;
String dirFileIdPath = Nfs3Utils.getFileIdPath(dirHandle);
Nfs3FileAttributes preOpDirAttr = null;
Nfs3FileAttributes postOpObjAttr = null;
FileHandle fileHandle = null;
WccData dirWcc = null;
try {
preOpDirAttr = Nfs3Utils.getFileAttr(dfsClient, dirFileIdPath, iug);
if (preOpDirAttr == null) {
LOG.error("Can't get path for dirHandle: " + dirHandle);
return new CREATE3Response(Nfs3Status.NFS3ERR_STALE);
}
if (!checkAccessPrivilege(remoteAddress, AccessPrivilege.READ_WRITE)) {
return new CREATE3Response(Nfs3Status.NFS3ERR_ACCES, null, preOpDirAttr, new WccData(Nfs3Utils.getWccAttr(preOpDirAttr), preOpDirAttr));
}
String fileIdPath = Nfs3Utils.getFileIdPath(dirHandle) + "/" + fileName;
SetAttr3 setAttr3 = request.getObjAttr();
assert (setAttr3 != null);
FsPermission permission = setAttr3.getUpdateFields().contains(SetAttrField.MODE) ? new FsPermission((short) setAttr3.getMode()) : FsPermission.getDefault().applyUMask(umask);
EnumSet<CreateFlag> flag = (createMode != Nfs3Constant.CREATE_EXCLUSIVE) ? EnumSet.of(CreateFlag.CREATE, CreateFlag.OVERWRITE) : EnumSet.of(CreateFlag.CREATE);
fos = dfsClient.createWrappedOutputStream(dfsClient.create(fileIdPath, permission, flag, false, replication, blockSize, null, bufferSize, null), null);
if ((createMode == Nfs3Constant.CREATE_UNCHECKED) || (createMode == Nfs3Constant.CREATE_GUARDED)) {
// Set group if it's not specified in the request.
if (!setAttr3.getUpdateFields().contains(SetAttrField.GID)) {
setAttr3.getUpdateFields().add(SetAttrField.GID);
setAttr3.setGid(securityHandler.getGid());
}
setattrInternal(dfsClient, fileIdPath, setAttr3, false);
}
postOpObjAttr = Nfs3Utils.getFileAttr(dfsClient, fileIdPath, iug);
dirWcc = Nfs3Utils.createWccData(Nfs3Utils.getWccAttr(preOpDirAttr), dfsClient, dirFileIdPath, iug);
// Add open stream
OpenFileCtx openFileCtx = new OpenFileCtx(fos, postOpObjAttr, writeDumpDir + "/" + postOpObjAttr.getFileId(), dfsClient, iug, aixCompatMode, config);
fileHandle = new FileHandle(postOpObjAttr.getFileId());
if (!writeManager.addOpenFileStream(fileHandle, openFileCtx)) {
LOG.warn("Can't add more stream, close it." + " Future write will become append");
fos.close();
fos = null;
} else {
if (LOG.isDebugEnabled()) {
LOG.debug("Opened stream for file: " + fileName + ", fileId: " + fileHandle.getFileId());
}
}
} catch (IOException e) {
LOG.error("Exception", e);
if (fos != null) {
try {
fos.close();
} catch (IOException e1) {
LOG.error("Can't close stream for dirFileId: " + dirHandle.getFileId() + " filename: " + fileName, e1);
}
}
if (dirWcc == null) {
try {
dirWcc = Nfs3Utils.createWccData(Nfs3Utils.getWccAttr(preOpDirAttr), dfsClient, dirFileIdPath, iug);
} catch (IOException e1) {
LOG.error("Can't get postOpDirAttr for dirFileId: " + dirHandle.getFileId(), e1);
}
}
int status = mapErrorStatus(e);
return new CREATE3Response(status, fileHandle, postOpObjAttr, dirWcc);
}
return new CREATE3Response(Nfs3Status.NFS3_OK, fileHandle, postOpObjAttr, dirWcc);
}
use of org.apache.hadoop.fs.CreateFlag in project hadoop by apache.
the class ClientNamenodeProtocolServerSideTranslatorPB method append.
@Override
public AppendResponseProto append(RpcController controller, AppendRequestProto req) throws ServiceException {
try {
EnumSetWritable<CreateFlag> flags = req.hasFlag() ? PBHelperClient.convertCreateFlag(req.getFlag()) : new EnumSetWritable<>(EnumSet.of(CreateFlag.APPEND));
LastBlockWithStatus result = server.append(req.getSrc(), req.getClientName(), flags);
AppendResponseProto.Builder builder = AppendResponseProto.newBuilder();
if (result.getLastBlock() != null) {
builder.setBlock(PBHelperClient.convertLocatedBlock(result.getLastBlock()));
}
if (result.getFileStatus() != null) {
builder.setStat(PBHelperClient.convert(result.getFileStatus()));
}
return builder.build();
} catch (IOException e) {
throw new ServiceException(e);
}
}
use of org.apache.hadoop.fs.CreateFlag in project hadoop by apache.
the class DFSTestUtil method createFile.
public static void createFile(FileSystem fs, Path fileName, boolean isLazyPersist, int bufferLen, long fileLen, long blockSize, short replFactor, long seed, boolean flush, InetSocketAddress[] favoredNodes) throws IOException {
assert bufferLen > 0;
if (!fs.mkdirs(fileName.getParent())) {
throw new IOException("Mkdirs failed to create " + fileName.getParent().toString());
}
EnumSet<CreateFlag> createFlags = EnumSet.of(CREATE);
createFlags.add(OVERWRITE);
if (isLazyPersist) {
createFlags.add(LAZY_PERSIST);
}
try (FSDataOutputStream out = (favoredNodes == null) ? fs.create(fileName, FsPermission.getFileDefault(), createFlags, fs.getConf().getInt(IO_FILE_BUFFER_SIZE_KEY, 4096), replFactor, blockSize, null) : ((DistributedFileSystem) fs).create(fileName, FsPermission.getDefault(), true, bufferLen, replFactor, blockSize, null, favoredNodes)) {
if (fileLen > 0) {
byte[] toWrite = new byte[bufferLen];
Random rb = new Random(seed);
long bytesToWrite = fileLen;
while (bytesToWrite > 0) {
rb.nextBytes(toWrite);
int bytesToWriteNext = (bufferLen < bytesToWrite) ? bufferLen : (int) bytesToWrite;
out.write(toWrite, 0, bytesToWriteNext);
bytesToWrite -= bytesToWriteNext;
}
if (flush) {
out.hsync();
}
}
}
}
Aggregations