use of org.apache.hadoop.fs.CreateFlag in project hadoop by apache.
the class WebHdfsHandler method onCreate.
private void onCreate(ChannelHandlerContext ctx) throws IOException, URISyntaxException {
writeContinueHeader(ctx);
final String nnId = params.namenodeId();
final int bufferSize = params.bufferSize();
final short replication = params.replication();
final long blockSize = params.blockSize();
final FsPermission unmaskedPermission = params.unmaskedPermission();
final FsPermission permission = unmaskedPermission == null ? params.permission() : FsCreateModes.create(params.permission(), unmaskedPermission);
final boolean createParent = params.createParent();
EnumSet<CreateFlag> flags = params.createFlag();
if (flags.equals(EMPTY_CREATE_FLAG)) {
flags = params.overwrite() ? EnumSet.of(CreateFlag.CREATE, CreateFlag.OVERWRITE) : EnumSet.of(CreateFlag.CREATE);
} else {
if (params.overwrite()) {
flags.add(CreateFlag.OVERWRITE);
}
}
final DFSClient dfsClient = newDfsClient(nnId, confForCreate);
OutputStream out = dfsClient.createWrappedOutputStream(dfsClient.create(path, permission, flags, createParent, replication, blockSize, null, bufferSize, null), null);
resp = new DefaultHttpResponse(HTTP_1_1, CREATED);
final URI uri = new URI(HDFS_URI_SCHEME, nnId, path, null, null);
resp.headers().set(LOCATION, uri.toString());
resp.headers().set(CONTENT_LENGTH, 0);
resp.headers().set(ACCESS_CONTROL_ALLOW_ORIGIN, "*");
ctx.pipeline().replace(this, HdfsWriter.class.getSimpleName(), new HdfsWriter(dfsClient, out, resp));
}
use of org.apache.hadoop.fs.CreateFlag in project hadoop by apache.
the class DFSClient method callAppend.
/** Method to get stream returned by append call */
private DFSOutputStream callAppend(String src, EnumSet<CreateFlag> flag, Progressable progress, String[] favoredNodes) throws IOException {
CreateFlag.validateForAppend(flag);
try {
final LastBlockWithStatus blkWithStatus = callAppend(src, new EnumSetWritable<>(flag, CreateFlag.class));
HdfsFileStatus status = blkWithStatus.getFileStatus();
if (status == null) {
LOG.debug("NameNode is on an older version, request file " + "info with additional RPC call for file: {}", src);
status = getFileInfo(src);
}
return DFSOutputStream.newStreamForAppend(this, src, flag, progress, blkWithStatus.getLastBlock(), status, dfsClientConf.createChecksum(null), favoredNodes);
} catch (RemoteException re) {
throw re.unwrapRemoteException(AccessControlException.class, FileNotFoundException.class, SafeModeException.class, DSQuotaExceededException.class, QuotaByStorageTypeExceededException.class, UnsupportedOperationException.class, UnresolvedPathException.class, SnapshotAccessControlException.class);
}
}
use of org.apache.hadoop.fs.CreateFlag in project hadoop by apache.
the class TestDistributedFileSystem method testCreateWithCustomChecksum.
@Test
public void testCreateWithCustomChecksum() throws Exception {
Configuration conf = getTestConfiguration();
MiniDFSCluster cluster = null;
Path testBasePath = new Path("/test/csum");
// create args
Path path1 = new Path(testBasePath, "file_wtih_crc1");
Path path2 = new Path(testBasePath, "file_with_crc2");
ChecksumOpt opt1 = new ChecksumOpt(DataChecksum.Type.CRC32C, 512);
ChecksumOpt opt2 = new ChecksumOpt(DataChecksum.Type.CRC32, 512);
// common args
FsPermission perm = FsPermission.getDefault().applyUMask(FsPermission.getUMask(conf));
EnumSet<CreateFlag> flags = EnumSet.of(CreateFlag.OVERWRITE, CreateFlag.CREATE);
short repl = 1;
try {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
FileSystem dfs = cluster.getFileSystem();
dfs.mkdirs(testBasePath);
// create two files with different checksum types
FSDataOutputStream out1 = dfs.create(path1, perm, flags, 4096, repl, 131072L, null, opt1);
FSDataOutputStream out2 = dfs.create(path2, perm, flags, 4096, repl, 131072L, null, opt2);
for (int i = 0; i < 1024; i++) {
out1.write(i);
out2.write(i);
}
out1.close();
out2.close();
// the two checksums must be different.
MD5MD5CRC32FileChecksum sum1 = (MD5MD5CRC32FileChecksum) dfs.getFileChecksum(path1);
MD5MD5CRC32FileChecksum sum2 = (MD5MD5CRC32FileChecksum) dfs.getFileChecksum(path2);
assertFalse(sum1.equals(sum2));
// check the individual params
assertEquals(DataChecksum.Type.CRC32C, sum1.getCrcType());
assertEquals(DataChecksum.Type.CRC32, sum2.getCrcType());
} finally {
if (cluster != null) {
cluster.getFileSystem().delete(testBasePath, true);
cluster.shutdown();
}
}
}
use of org.apache.hadoop.fs.CreateFlag in project hadoop by apache.
the class TestDFSOutputStream method testNoLocalWriteFlag.
@Test
public void testNoLocalWriteFlag() throws IOException {
DistributedFileSystem fs = cluster.getFileSystem();
EnumSet<CreateFlag> flags = EnumSet.of(CreateFlag.NO_LOCAL_WRITE, CreateFlag.CREATE);
BlockManager bm = cluster.getNameNode().getNamesystem().getBlockManager();
DatanodeManager dm = bm.getDatanodeManager();
try (FSDataOutputStream os = fs.create(new Path("/test-no-local"), FsPermission.getDefault(), flags, 512, (short) 2, 512, null)) {
// Inject a DatanodeManager that returns one DataNode as local node for
// the client.
DatanodeManager spyDm = spy(dm);
DatanodeDescriptor dn1 = dm.getDatanodeListForReport(HdfsConstants.DatanodeReportType.LIVE).get(0);
doReturn(dn1).when(spyDm).getDatanodeByHost("127.0.0.1");
Whitebox.setInternalState(bm, "datanodeManager", spyDm);
byte[] buf = new byte[512 * 16];
new Random().nextBytes(buf);
os.write(buf);
} finally {
Whitebox.setInternalState(bm, "datanodeManager", dm);
}
cluster.triggerBlockReports();
final String bpid = cluster.getNamesystem().getBlockPoolId();
// Total number of DataNodes is 3.
assertEquals(3, cluster.getAllBlockReports(bpid).size());
int numDataNodesWithData = 0;
for (Map<DatanodeStorage, BlockListAsLongs> dnBlocks : cluster.getAllBlockReports(bpid)) {
for (BlockListAsLongs blocks : dnBlocks.values()) {
if (blocks.getNumberOfBlocks() > 0) {
numDataNodesWithData++;
break;
}
}
}
// Verify that only one DN has no data.
assertEquals(1, 3 - numDataNodesWithData);
}
use of org.apache.hadoop.fs.CreateFlag in project hadoop by apache.
the class TestAddBlockRetry method testAddBlockRetryShouldReturnBlockWithLocations.
/*
* Since NameNode will not persist any locations of the block, addBlock()
* retry call after restart NN should re-select the locations and return to
* client. refer HDFS-5257
*/
@Test
public void testAddBlockRetryShouldReturnBlockWithLocations() throws Exception {
final String src = "/testAddBlockRetryShouldReturnBlockWithLocations";
NamenodeProtocols nameNodeRpc = cluster.getNameNodeRpc();
// create file
nameNodeRpc.create(src, FsPermission.getFileDefault(), "clientName", new EnumSetWritable<CreateFlag>(EnumSet.of(CreateFlag.CREATE)), true, (short) 3, 1024, null);
// start first addBlock()
LOG.info("Starting first addBlock for " + src);
LocatedBlock lb1 = nameNodeRpc.addBlock(src, "clientName", null, null, HdfsConstants.GRANDFATHER_INODE_ID, null, null);
assertTrue("Block locations should be present", lb1.getLocations().length > 0);
cluster.restartNameNode();
nameNodeRpc = cluster.getNameNodeRpc();
LocatedBlock lb2 = nameNodeRpc.addBlock(src, "clientName", null, null, HdfsConstants.GRANDFATHER_INODE_ID, null, null);
assertEquals("Blocks are not equal", lb1.getBlock(), lb2.getBlock());
assertTrue("Wrong locations with retry", lb2.getLocations().length > 0);
}
Aggregations