Search in sources :

Example 1 with CreateFlag

use of org.apache.hadoop.fs.CreateFlag in project hadoop by apache.

the class WebHdfsHandler method onCreate.

private void onCreate(ChannelHandlerContext ctx) throws IOException, URISyntaxException {
    writeContinueHeader(ctx);
    final String nnId = params.namenodeId();
    final int bufferSize = params.bufferSize();
    final short replication = params.replication();
    final long blockSize = params.blockSize();
    final FsPermission unmaskedPermission = params.unmaskedPermission();
    final FsPermission permission = unmaskedPermission == null ? params.permission() : FsCreateModes.create(params.permission(), unmaskedPermission);
    final boolean createParent = params.createParent();
    EnumSet<CreateFlag> flags = params.createFlag();
    if (flags.equals(EMPTY_CREATE_FLAG)) {
        flags = params.overwrite() ? EnumSet.of(CreateFlag.CREATE, CreateFlag.OVERWRITE) : EnumSet.of(CreateFlag.CREATE);
    } else {
        if (params.overwrite()) {
            flags.add(CreateFlag.OVERWRITE);
        }
    }
    final DFSClient dfsClient = newDfsClient(nnId, confForCreate);
    OutputStream out = dfsClient.createWrappedOutputStream(dfsClient.create(path, permission, flags, createParent, replication, blockSize, null, bufferSize, null), null);
    resp = new DefaultHttpResponse(HTTP_1_1, CREATED);
    final URI uri = new URI(HDFS_URI_SCHEME, nnId, path, null, null);
    resp.headers().set(LOCATION, uri.toString());
    resp.headers().set(CONTENT_LENGTH, 0);
    resp.headers().set(ACCESS_CONTROL_ALLOW_ORIGIN, "*");
    ctx.pipeline().replace(this, HdfsWriter.class.getSimpleName(), new HdfsWriter(dfsClient, out, resp));
}
Also used : CreateFlag(org.apache.hadoop.fs.CreateFlag) DFSClient(org.apache.hadoop.hdfs.DFSClient) OutputStream(java.io.OutputStream) URI(java.net.URI) DefaultHttpResponse(io.netty.handler.codec.http.DefaultHttpResponse) FsPermission(org.apache.hadoop.fs.permission.FsPermission)

Example 2 with CreateFlag

use of org.apache.hadoop.fs.CreateFlag in project hadoop by apache.

the class DFSClient method callAppend.

/** Method to get stream returned by append call */
private DFSOutputStream callAppend(String src, EnumSet<CreateFlag> flag, Progressable progress, String[] favoredNodes) throws IOException {
    CreateFlag.validateForAppend(flag);
    try {
        final LastBlockWithStatus blkWithStatus = callAppend(src, new EnumSetWritable<>(flag, CreateFlag.class));
        HdfsFileStatus status = blkWithStatus.getFileStatus();
        if (status == null) {
            LOG.debug("NameNode is on an older version, request file " + "info with additional RPC call for file: {}", src);
            status = getFileInfo(src);
        }
        return DFSOutputStream.newStreamForAppend(this, src, flag, progress, blkWithStatus.getLastBlock(), status, dfsClientConf.createChecksum(null), favoredNodes);
    } catch (RemoteException re) {
        throw re.unwrapRemoteException(AccessControlException.class, FileNotFoundException.class, SafeModeException.class, DSQuotaExceededException.class, QuotaByStorageTypeExceededException.class, UnsupportedOperationException.class, UnresolvedPathException.class, SnapshotAccessControlException.class);
    }
}
Also used : CreateFlag(org.apache.hadoop.fs.CreateFlag) QuotaByStorageTypeExceededException(org.apache.hadoop.hdfs.protocol.QuotaByStorageTypeExceededException) LastBlockWithStatus(org.apache.hadoop.hdfs.protocol.LastBlockWithStatus) FileNotFoundException(java.io.FileNotFoundException) AccessControlException(org.apache.hadoop.security.AccessControlException) SnapshotAccessControlException(org.apache.hadoop.hdfs.protocol.SnapshotAccessControlException) HdfsFileStatus(org.apache.hadoop.hdfs.protocol.HdfsFileStatus) DSQuotaExceededException(org.apache.hadoop.hdfs.protocol.DSQuotaExceededException) SafeModeException(org.apache.hadoop.hdfs.server.namenode.SafeModeException) SnapshotAccessControlException(org.apache.hadoop.hdfs.protocol.SnapshotAccessControlException) RemoteException(org.apache.hadoop.ipc.RemoteException) UnresolvedPathException(org.apache.hadoop.hdfs.protocol.UnresolvedPathException)

Example 3 with CreateFlag

use of org.apache.hadoop.fs.CreateFlag in project hadoop by apache.

the class TestDistributedFileSystem method testCreateWithCustomChecksum.

@Test
public void testCreateWithCustomChecksum() throws Exception {
    Configuration conf = getTestConfiguration();
    MiniDFSCluster cluster = null;
    Path testBasePath = new Path("/test/csum");
    // create args 
    Path path1 = new Path(testBasePath, "file_wtih_crc1");
    Path path2 = new Path(testBasePath, "file_with_crc2");
    ChecksumOpt opt1 = new ChecksumOpt(DataChecksum.Type.CRC32C, 512);
    ChecksumOpt opt2 = new ChecksumOpt(DataChecksum.Type.CRC32, 512);
    // common args
    FsPermission perm = FsPermission.getDefault().applyUMask(FsPermission.getUMask(conf));
    EnumSet<CreateFlag> flags = EnumSet.of(CreateFlag.OVERWRITE, CreateFlag.CREATE);
    short repl = 1;
    try {
        cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
        FileSystem dfs = cluster.getFileSystem();
        dfs.mkdirs(testBasePath);
        // create two files with different checksum types
        FSDataOutputStream out1 = dfs.create(path1, perm, flags, 4096, repl, 131072L, null, opt1);
        FSDataOutputStream out2 = dfs.create(path2, perm, flags, 4096, repl, 131072L, null, opt2);
        for (int i = 0; i < 1024; i++) {
            out1.write(i);
            out2.write(i);
        }
        out1.close();
        out2.close();
        // the two checksums must be different.
        MD5MD5CRC32FileChecksum sum1 = (MD5MD5CRC32FileChecksum) dfs.getFileChecksum(path1);
        MD5MD5CRC32FileChecksum sum2 = (MD5MD5CRC32FileChecksum) dfs.getFileChecksum(path2);
        assertFalse(sum1.equals(sum2));
        // check the individual params
        assertEquals(DataChecksum.Type.CRC32C, sum1.getCrcType());
        assertEquals(DataChecksum.Type.CRC32, sum2.getCrcType());
    } finally {
        if (cluster != null) {
            cluster.getFileSystem().delete(testBasePath, true);
            cluster.shutdown();
        }
    }
}
Also used : Path(org.apache.hadoop.fs.Path) CreateFlag(org.apache.hadoop.fs.CreateFlag) MD5MD5CRC32FileChecksum(org.apache.hadoop.fs.MD5MD5CRC32FileChecksum) Configuration(org.apache.hadoop.conf.Configuration) ChecksumOpt(org.apache.hadoop.fs.Options.ChecksumOpt) FileSystem(org.apache.hadoop.fs.FileSystem) FsPermission(org.apache.hadoop.fs.permission.FsPermission) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream) Test(org.junit.Test)

Example 4 with CreateFlag

use of org.apache.hadoop.fs.CreateFlag in project hadoop by apache.

the class TestDFSOutputStream method testNoLocalWriteFlag.

@Test
public void testNoLocalWriteFlag() throws IOException {
    DistributedFileSystem fs = cluster.getFileSystem();
    EnumSet<CreateFlag> flags = EnumSet.of(CreateFlag.NO_LOCAL_WRITE, CreateFlag.CREATE);
    BlockManager bm = cluster.getNameNode().getNamesystem().getBlockManager();
    DatanodeManager dm = bm.getDatanodeManager();
    try (FSDataOutputStream os = fs.create(new Path("/test-no-local"), FsPermission.getDefault(), flags, 512, (short) 2, 512, null)) {
        // Inject a DatanodeManager that returns one DataNode as local node for
        // the client.
        DatanodeManager spyDm = spy(dm);
        DatanodeDescriptor dn1 = dm.getDatanodeListForReport(HdfsConstants.DatanodeReportType.LIVE).get(0);
        doReturn(dn1).when(spyDm).getDatanodeByHost("127.0.0.1");
        Whitebox.setInternalState(bm, "datanodeManager", spyDm);
        byte[] buf = new byte[512 * 16];
        new Random().nextBytes(buf);
        os.write(buf);
    } finally {
        Whitebox.setInternalState(bm, "datanodeManager", dm);
    }
    cluster.triggerBlockReports();
    final String bpid = cluster.getNamesystem().getBlockPoolId();
    // Total number of DataNodes is 3.
    assertEquals(3, cluster.getAllBlockReports(bpid).size());
    int numDataNodesWithData = 0;
    for (Map<DatanodeStorage, BlockListAsLongs> dnBlocks : cluster.getAllBlockReports(bpid)) {
        for (BlockListAsLongs blocks : dnBlocks.values()) {
            if (blocks.getNumberOfBlocks() > 0) {
                numDataNodesWithData++;
                break;
            }
        }
    }
    // Verify that only one DN has no data.
    assertEquals(1, 3 - numDataNodesWithData);
}
Also used : CreateFlag(org.apache.hadoop.fs.CreateFlag) Path(org.apache.hadoop.fs.Path) DatanodeDescriptor(org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor) DatanodeManager(org.apache.hadoop.hdfs.server.blockmanagement.DatanodeManager) Random(java.util.Random) BlockManager(org.apache.hadoop.hdfs.server.blockmanagement.BlockManager) DatanodeStorage(org.apache.hadoop.hdfs.server.protocol.DatanodeStorage) BlockListAsLongs(org.apache.hadoop.hdfs.protocol.BlockListAsLongs) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream) Test(org.junit.Test)

Example 5 with CreateFlag

use of org.apache.hadoop.fs.CreateFlag in project hadoop by apache.

the class TestAddBlockRetry method testAddBlockRetryShouldReturnBlockWithLocations.

/*
   * Since NameNode will not persist any locations of the block, addBlock()
   * retry call after restart NN should re-select the locations and return to
   * client. refer HDFS-5257
   */
@Test
public void testAddBlockRetryShouldReturnBlockWithLocations() throws Exception {
    final String src = "/testAddBlockRetryShouldReturnBlockWithLocations";
    NamenodeProtocols nameNodeRpc = cluster.getNameNodeRpc();
    // create file
    nameNodeRpc.create(src, FsPermission.getFileDefault(), "clientName", new EnumSetWritable<CreateFlag>(EnumSet.of(CreateFlag.CREATE)), true, (short) 3, 1024, null);
    // start first addBlock()
    LOG.info("Starting first addBlock for " + src);
    LocatedBlock lb1 = nameNodeRpc.addBlock(src, "clientName", null, null, HdfsConstants.GRANDFATHER_INODE_ID, null, null);
    assertTrue("Block locations should be present", lb1.getLocations().length > 0);
    cluster.restartNameNode();
    nameNodeRpc = cluster.getNameNodeRpc();
    LocatedBlock lb2 = nameNodeRpc.addBlock(src, "clientName", null, null, HdfsConstants.GRANDFATHER_INODE_ID, null, null);
    assertEquals("Blocks are not equal", lb1.getBlock(), lb2.getBlock());
    assertTrue("Wrong locations with retry", lb2.getLocations().length > 0);
}
Also used : CreateFlag(org.apache.hadoop.fs.CreateFlag) NamenodeProtocols(org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols) LocatedBlock(org.apache.hadoop.hdfs.protocol.LocatedBlock) Test(org.junit.Test)

Aggregations

CreateFlag (org.apache.hadoop.fs.CreateFlag)18 FSDataOutputStream (org.apache.hadoop.fs.FSDataOutputStream)9 Test (org.junit.Test)8 IOException (java.io.IOException)6 FsPermission (org.apache.hadoop.fs.permission.FsPermission)6 Path (org.apache.hadoop.fs.Path)5 Configuration (org.apache.hadoop.conf.Configuration)4 HdfsFileStatus (org.apache.hadoop.hdfs.protocol.HdfsFileStatus)4 NamenodeProtocols (org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols)4 FileNotFoundException (java.io.FileNotFoundException)3 OutputStream (java.io.OutputStream)2 URI (java.net.URI)2 Random (java.util.Random)2 CryptoProtocolVersion (org.apache.hadoop.crypto.CryptoProtocolVersion)2 FileSystem (org.apache.hadoop.fs.FileSystem)2 DFSClient (org.apache.hadoop.hdfs.DFSClient)2 LastBlockWithStatus (org.apache.hadoop.hdfs.protocol.LastBlockWithStatus)2 LocatedBlock (org.apache.hadoop.hdfs.protocol.LocatedBlock)2 RemoteException (org.apache.hadoop.ipc.RemoteException)2 Matchers.anyString (org.mockito.Matchers.anyString)2