use of org.apache.hadoop.hdfs.client.HdfsDataOutputStream in project hadoop by apache.
the class TestINodeFileUnderConstructionWithSnapshot method appendFileWithoutClosing.
private HdfsDataOutputStream appendFileWithoutClosing(Path file, int length) throws IOException {
byte[] toAppend = new byte[length];
Random random = new Random();
random.nextBytes(toAppend);
HdfsDataOutputStream out = (HdfsDataOutputStream) hdfs.append(file);
out.write(toAppend);
return out;
}
use of org.apache.hadoop.hdfs.client.HdfsDataOutputStream in project hadoop by apache.
the class DistributedFileSystem method create.
/**
* Same as
* {@link #create(Path, FsPermission, boolean, int, short, long,
* Progressable)} with the addition of favoredNodes that is a hint to
* where the namenode should place the file blocks.
* The favored nodes hint is not persisted in HDFS. Hence it may be honored
* at the creation time only. And with favored nodes, blocks will be pinned
* on the datanodes to prevent balancing move the block. HDFS could move the
* blocks during replication, to move the blocks from favored nodes. A value
* of null means no favored nodes for this create
*/
public HdfsDataOutputStream create(final Path f, final FsPermission permission, final boolean overwrite, final int bufferSize, final short replication, final long blockSize, final Progressable progress, final InetSocketAddress[] favoredNodes) throws IOException {
statistics.incrementWriteOps(1);
storageStatistics.incrementOpCounter(OpType.CREATE);
Path absF = fixRelativePart(f);
return new FileSystemLinkResolver<HdfsDataOutputStream>() {
@Override
public HdfsDataOutputStream doCall(final Path p) throws IOException {
final DFSOutputStream out = dfs.create(getPathName(f), permission, overwrite ? EnumSet.of(CreateFlag.CREATE, CreateFlag.OVERWRITE) : EnumSet.of(CreateFlag.CREATE), true, replication, blockSize, progress, bufferSize, null, favoredNodes);
return dfs.createWrappedOutputStream(out, statistics);
}
@Override
public HdfsDataOutputStream next(final FileSystem fs, final Path p) throws IOException {
if (fs instanceof DistributedFileSystem) {
DistributedFileSystem myDfs = (DistributedFileSystem) fs;
return myDfs.create(p, permission, overwrite, bufferSize, replication, blockSize, progress, favoredNodes);
}
throw new UnsupportedOperationException("Cannot create with" + " favoredNodes through a symlink to a non-DistributedFileSystem: " + f + " -> " + p);
}
}.resolve(this, absF);
}
use of org.apache.hadoop.hdfs.client.HdfsDataOutputStream in project hadoop by apache.
the class RpcProgramNfs3 method create.
@VisibleForTesting
CREATE3Response create(XDR xdr, SecurityHandler securityHandler, SocketAddress remoteAddress) {
CREATE3Response response = new CREATE3Response(Nfs3Status.NFS3_OK);
DFSClient dfsClient = clientCache.getDfsClient(securityHandler.getUser());
if (dfsClient == null) {
response.setStatus(Nfs3Status.NFS3ERR_SERVERFAULT);
return response;
}
CREATE3Request request;
try {
request = CREATE3Request.deserialize(xdr);
} catch (IOException e) {
LOG.error("Invalid CREATE request");
return new CREATE3Response(Nfs3Status.NFS3ERR_INVAL);
}
FileHandle dirHandle = request.getHandle();
String fileName = request.getName();
if (LOG.isDebugEnabled()) {
LOG.debug("NFS CREATE dir fileId: " + dirHandle.getFileId() + " filename: " + fileName + " client: " + remoteAddress);
}
int createMode = request.getMode();
if ((createMode != Nfs3Constant.CREATE_EXCLUSIVE) && request.getObjAttr().getUpdateFields().contains(SetAttrField.SIZE) && request.getObjAttr().getSize() != 0) {
LOG.error("Setting file size is not supported when creating file: " + fileName + " dir fileId: " + dirHandle.getFileId());
return new CREATE3Response(Nfs3Status.NFS3ERR_INVAL);
}
HdfsDataOutputStream fos = null;
String dirFileIdPath = Nfs3Utils.getFileIdPath(dirHandle);
Nfs3FileAttributes preOpDirAttr = null;
Nfs3FileAttributes postOpObjAttr = null;
FileHandle fileHandle = null;
WccData dirWcc = null;
try {
preOpDirAttr = Nfs3Utils.getFileAttr(dfsClient, dirFileIdPath, iug);
if (preOpDirAttr == null) {
LOG.error("Can't get path for dirHandle: " + dirHandle);
return new CREATE3Response(Nfs3Status.NFS3ERR_STALE);
}
if (!checkAccessPrivilege(remoteAddress, AccessPrivilege.READ_WRITE)) {
return new CREATE3Response(Nfs3Status.NFS3ERR_ACCES, null, preOpDirAttr, new WccData(Nfs3Utils.getWccAttr(preOpDirAttr), preOpDirAttr));
}
String fileIdPath = Nfs3Utils.getFileIdPath(dirHandle) + "/" + fileName;
SetAttr3 setAttr3 = request.getObjAttr();
assert (setAttr3 != null);
FsPermission permission = setAttr3.getUpdateFields().contains(SetAttrField.MODE) ? new FsPermission((short) setAttr3.getMode()) : FsPermission.getDefault().applyUMask(umask);
EnumSet<CreateFlag> flag = (createMode != Nfs3Constant.CREATE_EXCLUSIVE) ? EnumSet.of(CreateFlag.CREATE, CreateFlag.OVERWRITE) : EnumSet.of(CreateFlag.CREATE);
fos = dfsClient.createWrappedOutputStream(dfsClient.create(fileIdPath, permission, flag, false, replication, blockSize, null, bufferSize, null), null);
if ((createMode == Nfs3Constant.CREATE_UNCHECKED) || (createMode == Nfs3Constant.CREATE_GUARDED)) {
// Set group if it's not specified in the request.
if (!setAttr3.getUpdateFields().contains(SetAttrField.GID)) {
setAttr3.getUpdateFields().add(SetAttrField.GID);
setAttr3.setGid(securityHandler.getGid());
}
setattrInternal(dfsClient, fileIdPath, setAttr3, false);
}
postOpObjAttr = Nfs3Utils.getFileAttr(dfsClient, fileIdPath, iug);
dirWcc = Nfs3Utils.createWccData(Nfs3Utils.getWccAttr(preOpDirAttr), dfsClient, dirFileIdPath, iug);
// Add open stream
OpenFileCtx openFileCtx = new OpenFileCtx(fos, postOpObjAttr, writeDumpDir + "/" + postOpObjAttr.getFileId(), dfsClient, iug, aixCompatMode, config);
fileHandle = new FileHandle(postOpObjAttr.getFileId());
if (!writeManager.addOpenFileStream(fileHandle, openFileCtx)) {
LOG.warn("Can't add more stream, close it." + " Future write will become append");
fos.close();
fos = null;
} else {
if (LOG.isDebugEnabled()) {
LOG.debug("Opened stream for file: " + fileName + ", fileId: " + fileHandle.getFileId());
}
}
} catch (IOException e) {
LOG.error("Exception", e);
if (fos != null) {
try {
fos.close();
} catch (IOException e1) {
LOG.error("Can't close stream for dirFileId: " + dirHandle.getFileId() + " filename: " + fileName, e1);
}
}
if (dirWcc == null) {
try {
dirWcc = Nfs3Utils.createWccData(Nfs3Utils.getWccAttr(preOpDirAttr), dfsClient, dirFileIdPath, iug);
} catch (IOException e1) {
LOG.error("Can't get postOpDirAttr for dirFileId: " + dirHandle.getFileId(), e1);
}
}
int status = mapErrorStatus(e);
return new CREATE3Response(status, fileHandle, postOpObjAttr, dirWcc);
}
return new CREATE3Response(Nfs3Status.NFS3_OK, fileHandle, postOpObjAttr, dirWcc);
}
use of org.apache.hadoop.hdfs.client.HdfsDataOutputStream in project hadoop by apache.
the class TestWrites method testCheckCommitLargeFileUpload.
@Test
public // large file upload option.
void testCheckCommitLargeFileUpload() throws IOException {
DFSClient dfsClient = Mockito.mock(DFSClient.class);
Nfs3FileAttributes attr = new Nfs3FileAttributes();
HdfsDataOutputStream fos = Mockito.mock(HdfsDataOutputStream.class);
Mockito.when(fos.getPos()).thenReturn((long) 0);
NfsConfiguration conf = new NfsConfiguration();
conf.setBoolean(NfsConfigKeys.LARGE_FILE_UPLOAD, true);
OpenFileCtx ctx = new OpenFileCtx(fos, attr, "/dumpFilePath", dfsClient, new ShellBasedIdMapping(conf), false, conf);
COMMIT_STATUS ret;
// Test inactive open file context
ctx.setActiveStatusForTest(false);
Channel ch = Mockito.mock(Channel.class);
ret = ctx.checkCommit(dfsClient, 0, ch, 1, attr, false);
Assert.assertTrue(ret == COMMIT_STATUS.COMMIT_INACTIVE_CTX);
ctx.getPendingWritesForTest().put(new OffsetRange(10, 15), new WriteCtx(null, 0, 0, 0, null, null, null, 0, false, null));
ret = ctx.checkCommit(dfsClient, 0, ch, 1, attr, false);
Assert.assertTrue(ret == COMMIT_STATUS.COMMIT_INACTIVE_WITH_PENDING_WRITE);
// Test request with non zero commit offset
ctx.setActiveStatusForTest(true);
Mockito.when(fos.getPos()).thenReturn((long) 8);
ctx.setNextOffsetForTest(10);
COMMIT_STATUS status = ctx.checkCommitInternal(5, null, 1, attr, false);
Assert.assertTrue(status == COMMIT_STATUS.COMMIT_DO_SYNC);
// Do_SYNC state will be updated to FINISHED after data sync
ret = ctx.checkCommit(dfsClient, 5, ch, 1, attr, false);
Assert.assertTrue(ret == COMMIT_STATUS.COMMIT_FINISHED);
// Test commit sequential writes
status = ctx.checkCommitInternal(10, ch, 1, attr, false);
Assert.assertTrue(status == COMMIT_STATUS.COMMIT_SPECIAL_WAIT);
ret = ctx.checkCommit(dfsClient, 10, ch, 1, attr, false);
Assert.assertTrue(ret == COMMIT_STATUS.COMMIT_SPECIAL_WAIT);
// Test commit non-sequential writes
ConcurrentNavigableMap<Long, CommitCtx> commits = ctx.getPendingCommitsForTest();
Assert.assertTrue(commits.size() == 1);
ret = ctx.checkCommit(dfsClient, 16, ch, 1, attr, false);
Assert.assertTrue(ret == COMMIT_STATUS.COMMIT_SPECIAL_SUCCESS);
Assert.assertTrue(commits.size() == 1);
// Test request with zero commit offset
commits.remove(new Long(10));
// There is one pending write [10,15]
ret = ctx.checkCommitInternal(0, ch, 1, attr, false);
Assert.assertTrue(ret == COMMIT_STATUS.COMMIT_SPECIAL_WAIT);
ret = ctx.checkCommitInternal(9, ch, 1, attr, false);
Assert.assertTrue(ret == COMMIT_STATUS.COMMIT_SPECIAL_WAIT);
Assert.assertTrue(commits.size() == 2);
// Empty pending writes. nextOffset=10, flushed pos=8
ctx.getPendingWritesForTest().remove(new OffsetRange(10, 15));
ret = ctx.checkCommit(dfsClient, 0, ch, 1, attr, false);
Assert.assertTrue(ret == COMMIT_STATUS.COMMIT_SPECIAL_WAIT);
// Empty pending writes
// flushed pos = 8
ctx.setNextOffsetForTest((long) 8);
ret = ctx.checkCommit(dfsClient, 0, ch, 1, attr, false);
Assert.assertTrue(ret == COMMIT_STATUS.COMMIT_FINISHED);
}
use of org.apache.hadoop.hdfs.client.HdfsDataOutputStream in project hadoop by apache.
the class TestWrites method testCheckSequential.
@Test
public void testCheckSequential() throws IOException {
DFSClient dfsClient = Mockito.mock(DFSClient.class);
Nfs3FileAttributes attr = new Nfs3FileAttributes();
HdfsDataOutputStream fos = Mockito.mock(HdfsDataOutputStream.class);
Mockito.when(fos.getPos()).thenReturn((long) 0);
NfsConfiguration config = new NfsConfiguration();
config.setBoolean(NfsConfigKeys.LARGE_FILE_UPLOAD, false);
OpenFileCtx ctx = new OpenFileCtx(fos, attr, "/dumpFilePath", dfsClient, new ShellBasedIdMapping(config), false, config);
ctx.getPendingWritesForTest().put(new OffsetRange(5, 10), new WriteCtx(null, 0, 0, 0, null, null, null, 0, false, null));
ctx.getPendingWritesForTest().put(new OffsetRange(10, 15), new WriteCtx(null, 0, 0, 0, null, null, null, 0, false, null));
ctx.getPendingWritesForTest().put(new OffsetRange(20, 25), new WriteCtx(null, 0, 0, 0, null, null, null, 0, false, null));
assertTrue(!ctx.checkSequential(5, 4));
assertTrue(ctx.checkSequential(9, 5));
assertTrue(ctx.checkSequential(10, 5));
assertTrue(ctx.checkSequential(14, 5));
assertTrue(!ctx.checkSequential(15, 5));
assertTrue(!ctx.checkSequential(20, 5));
assertTrue(!ctx.checkSequential(25, 5));
assertTrue(!ctx.checkSequential(999, 5));
}
Aggregations