Search in sources :

Example 71 with DFSClient

use of org.apache.hadoop.hdfs.DFSClient in project hadoop by apache.

the class TestWrites method testWriteStableHow.

@Test
public void testWriteStableHow() throws IOException, InterruptedException {
    NfsConfiguration config = new NfsConfiguration();
    DFSClient client = null;
    MiniDFSCluster cluster = null;
    RpcProgramNfs3 nfsd;
    SecurityHandler securityHandler = Mockito.mock(SecurityHandler.class);
    Mockito.when(securityHandler.getUser()).thenReturn(System.getProperty("user.name"));
    String currentUser = System.getProperty("user.name");
    config.set(DefaultImpersonationProvider.getTestProvider().getProxySuperuserGroupConfKey(currentUser), "*");
    config.set(DefaultImpersonationProvider.getTestProvider().getProxySuperuserIpConfKey(currentUser), "*");
    ProxyUsers.refreshSuperUserGroupsConfiguration(config);
    try {
        cluster = new MiniDFSCluster.Builder(config).numDataNodes(1).build();
        cluster.waitActive();
        client = new DFSClient(DFSUtilClient.getNNAddress(config), config);
        // Use emphral port in case tests are running in parallel
        config.setInt("nfs3.mountd.port", 0);
        config.setInt("nfs3.server.port", 0);
        // Start nfs
        Nfs3 nfs3 = new Nfs3(config);
        nfs3.startServiceInternal(false);
        nfsd = (RpcProgramNfs3) nfs3.getRpcProgram();
        HdfsFileStatus status = client.getFileInfo("/");
        FileHandle rootHandle = new FileHandle(status.getFileId());
        // Create file1
        CREATE3Request createReq = new CREATE3Request(rootHandle, "file1", Nfs3Constant.CREATE_UNCHECKED, new SetAttr3(), 0);
        XDR createXdr = new XDR();
        createReq.serialize(createXdr);
        CREATE3Response createRsp = nfsd.create(createXdr.asReadOnlyWrap(), securityHandler, new InetSocketAddress("localhost", 1234));
        FileHandle handle = createRsp.getObjHandle();
        // Test DATA_SYNC
        byte[] buffer = new byte[10];
        for (int i = 0; i < 10; i++) {
            buffer[i] = (byte) i;
        }
        WRITE3Request writeReq = new WRITE3Request(handle, 0, 10, WriteStableHow.DATA_SYNC, ByteBuffer.wrap(buffer));
        XDR writeXdr = new XDR();
        writeReq.serialize(writeXdr);
        nfsd.write(writeXdr.asReadOnlyWrap(), null, 1, securityHandler, new InetSocketAddress("localhost", 1234));
        waitWrite(nfsd, handle, 60000);
        // Readback
        READ3Request readReq = new READ3Request(handle, 0, 10);
        XDR readXdr = new XDR();
        readReq.serialize(readXdr);
        READ3Response readRsp = nfsd.read(readXdr.asReadOnlyWrap(), securityHandler, new InetSocketAddress("localhost", 1234));
        assertTrue(Arrays.equals(buffer, readRsp.getData().array()));
        // Test FILE_SYNC
        // Create file2
        CREATE3Request createReq2 = new CREATE3Request(rootHandle, "file2", Nfs3Constant.CREATE_UNCHECKED, new SetAttr3(), 0);
        XDR createXdr2 = new XDR();
        createReq2.serialize(createXdr2);
        CREATE3Response createRsp2 = nfsd.create(createXdr2.asReadOnlyWrap(), securityHandler, new InetSocketAddress("localhost", 1234));
        FileHandle handle2 = createRsp2.getObjHandle();
        WRITE3Request writeReq2 = new WRITE3Request(handle2, 0, 10, WriteStableHow.FILE_SYNC, ByteBuffer.wrap(buffer));
        XDR writeXdr2 = new XDR();
        writeReq2.serialize(writeXdr2);
        nfsd.write(writeXdr2.asReadOnlyWrap(), null, 1, securityHandler, new InetSocketAddress("localhost", 1234));
        waitWrite(nfsd, handle2, 60000);
        // Readback
        READ3Request readReq2 = new READ3Request(handle2, 0, 10);
        XDR readXdr2 = new XDR();
        readReq2.serialize(readXdr2);
        READ3Response readRsp2 = nfsd.read(readXdr2.asReadOnlyWrap(), securityHandler, new InetSocketAddress("localhost", 1234));
        assertTrue(Arrays.equals(buffer, readRsp2.getData().array()));
        // FILE_SYNC should sync the file size
        status = client.getFileInfo("/file2");
        assertTrue(status.getLen() == 10);
    } finally {
        if (cluster != null) {
            cluster.shutdown();
        }
    }
}
Also used : DFSClient(org.apache.hadoop.hdfs.DFSClient) SecurityHandler(org.apache.hadoop.oncrpc.security.SecurityHandler) MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) FileHandle(org.apache.hadoop.nfs.nfs3.FileHandle) InetSocketAddress(java.net.InetSocketAddress) XDR(org.apache.hadoop.oncrpc.XDR) WRITE3Request(org.apache.hadoop.nfs.nfs3.request.WRITE3Request) NfsConfiguration(org.apache.hadoop.hdfs.nfs.conf.NfsConfiguration) SetAttr3(org.apache.hadoop.nfs.nfs3.request.SetAttr3) READ3Request(org.apache.hadoop.nfs.nfs3.request.READ3Request) CREATE3Response(org.apache.hadoop.nfs.nfs3.response.CREATE3Response) HdfsFileStatus(org.apache.hadoop.hdfs.protocol.HdfsFileStatus) CREATE3Request(org.apache.hadoop.nfs.nfs3.request.CREATE3Request) READ3Response(org.apache.hadoop.nfs.nfs3.response.READ3Response) Test(org.junit.Test)

Example 72 with DFSClient

use of org.apache.hadoop.hdfs.DFSClient in project hadoop by apache.

the class WebHdfsHandler method onOpen.

private void onOpen(ChannelHandlerContext ctx) throws IOException {
    final String nnId = params.namenodeId();
    final int bufferSize = params.bufferSize();
    final long offset = params.offset();
    final long length = params.length();
    resp = new DefaultHttpResponse(HTTP_1_1, OK);
    HttpHeaders headers = resp.headers();
    // Allow the UI to access the file
    headers.set(ACCESS_CONTROL_ALLOW_METHODS, GET);
    headers.set(ACCESS_CONTROL_ALLOW_ORIGIN, "*");
    headers.set(CONTENT_TYPE, APPLICATION_OCTET_STREAM);
    headers.set(CONNECTION, CLOSE);
    final DFSClient dfsclient = newDfsClient(nnId, conf);
    HdfsDataInputStream in = dfsclient.createWrappedInputStream(dfsclient.open(path, bufferSize, true));
    in.seek(offset);
    long contentLength = in.getVisibleLength() - offset;
    if (length >= 0) {
        contentLength = Math.min(contentLength, length);
    }
    final InputStream data;
    if (contentLength >= 0) {
        headers.set(CONTENT_LENGTH, contentLength);
        data = new LimitInputStream(in, contentLength);
    } else {
        data = in;
    }
    ctx.write(resp);
    ctx.writeAndFlush(new ChunkedStream(data) {

        @Override
        public void close() throws Exception {
            super.close();
            dfsclient.close();
        }
    }).addListener(ChannelFutureListener.CLOSE);
}
Also used : DFSClient(org.apache.hadoop.hdfs.DFSClient) HttpHeaders(io.netty.handler.codec.http.HttpHeaders) DefaultHttpResponse(io.netty.handler.codec.http.DefaultHttpResponse) LimitInputStream(org.apache.hadoop.util.LimitInputStream) HdfsDataInputStream(org.apache.hadoop.hdfs.client.HdfsDataInputStream) InputStream(java.io.InputStream) ChunkedStream(io.netty.handler.stream.ChunkedStream) LimitInputStream(org.apache.hadoop.util.LimitInputStream) HdfsDataInputStream(org.apache.hadoop.hdfs.client.HdfsDataInputStream)

Example 73 with DFSClient

use of org.apache.hadoop.hdfs.DFSClient in project hadoop by apache.

the class WebHdfsHandler method onAppend.

private void onAppend(ChannelHandlerContext ctx) throws IOException {
    writeContinueHeader(ctx);
    final String nnId = params.namenodeId();
    final int bufferSize = params.bufferSize();
    DFSClient dfsClient = newDfsClient(nnId, conf);
    OutputStream out = dfsClient.append(path, bufferSize, EnumSet.of(CreateFlag.APPEND), null, null);
    resp = new DefaultHttpResponse(HTTP_1_1, OK);
    resp.headers().set(CONTENT_LENGTH, 0);
    ctx.pipeline().replace(this, HdfsWriter.class.getSimpleName(), new HdfsWriter(dfsClient, out, resp));
}
Also used : DFSClient(org.apache.hadoop.hdfs.DFSClient) DefaultHttpResponse(io.netty.handler.codec.http.DefaultHttpResponse) OutputStream(java.io.OutputStream)

Example 74 with DFSClient

use of org.apache.hadoop.hdfs.DFSClient in project hadoop by apache.

the class NamenodeFsck method copyBlocksToLostFound.

private void copyBlocksToLostFound(String parent, HdfsFileStatus file, LocatedBlocks blocks) throws IOException {
    final DFSClient dfs = new DFSClient(DFSUtilClient.getNNAddress(conf), conf);
    final String fullName = file.getFullName(parent);
    OutputStream fos = null;
    try {
        if (!lfInited) {
            lostFoundInit(dfs);
        }
        if (!lfInitedOk) {
            throw new IOException("failed to initialize lost+found");
        }
        String target = lostFound + fullName;
        if (hdfsPathExists(target)) {
            LOG.warn("Fsck: can't copy the remains of " + fullName + " to " + "lost+found, because " + target + " already exists.");
            return;
        }
        if (!namenode.getRpcServer().mkdirs(target, file.getPermission(), true)) {
            throw new IOException("failed to create directory " + target);
        }
        // create chains
        int chain = 0;
        boolean copyError = false;
        for (LocatedBlock lBlk : blocks.getLocatedBlocks()) {
            LocatedBlock lblock = lBlk;
            DatanodeInfo[] locs = lblock.getLocations();
            if (locs == null || locs.length == 0) {
                if (fos != null) {
                    fos.flush();
                    fos.close();
                    fos = null;
                }
                continue;
            }
            if (fos == null) {
                fos = dfs.create(target + "/" + chain, true);
                chain++;
            }
            // copy the block. It's a pity it's not abstracted from DFSInputStream ...
            try {
                copyBlock(dfs, lblock, fos);
            } catch (Exception e) {
                LOG.error("Fsck: could not copy block " + lblock.getBlock() + " to " + target, e);
                fos.flush();
                fos.close();
                fos = null;
                internalError = true;
                copyError = true;
            }
        }
        if (copyError) {
            LOG.warn("Fsck: there were errors copying the remains of the " + "corrupted file " + fullName + " to /lost+found");
        } else {
            LOG.info("Fsck: copied the remains of the corrupted file " + fullName + " to /lost+found");
        }
    } catch (Exception e) {
        LOG.error("copyBlocksToLostFound: error processing " + fullName, e);
        internalError = true;
    } finally {
        if (fos != null)
            fos.close();
        dfs.close();
    }
}
Also used : DFSClient(org.apache.hadoop.hdfs.DFSClient) DatanodeInfo(org.apache.hadoop.hdfs.protocol.DatanodeInfo) OutputStream(java.io.OutputStream) LocatedBlock(org.apache.hadoop.hdfs.protocol.LocatedBlock) IOException(java.io.IOException) UnresolvedLinkException(org.apache.hadoop.fs.UnresolvedLinkException) FileNotFoundException(java.io.FileNotFoundException) IOException(java.io.IOException) AccessControlException(org.apache.hadoop.security.AccessControlException)

Example 75 with DFSClient

use of org.apache.hadoop.hdfs.DFSClient in project hadoop by apache.

the class TestResolveHdfsSymlink method testLinkTargetNonSymlink.

/**
   * Verifies that attempting to resolve a non-symlink results in client
   * exception
   */
@Test
public void testLinkTargetNonSymlink() throws UnsupportedFileSystemException, IOException {
    FileContext fc = null;
    Path notSymlink = new Path("/notasymlink");
    try {
        fc = FileContext.getFileContext(cluster.getFileSystem().getUri());
        fc.create(notSymlink, EnumSet.of(CreateFlag.CREATE));
        DFSClient client = new DFSClient(cluster.getFileSystem().getUri(), cluster.getConfiguration(0));
        try {
            client.getLinkTarget(notSymlink.toString());
            fail("Expected exception for resolving non-symlink");
        } catch (IOException e) {
            GenericTestUtils.assertExceptionContains("is not a symbolic link", e);
        }
    } finally {
        if (fc != null) {
            fc.delete(notSymlink, false);
        }
    }
}
Also used : DFSClient(org.apache.hadoop.hdfs.DFSClient) IOException(java.io.IOException) Test(org.junit.Test)

Aggregations

DFSClient (org.apache.hadoop.hdfs.DFSClient)97 Test (org.junit.Test)53 IOException (java.io.IOException)35 Nfs3FileAttributes (org.apache.hadoop.nfs.nfs3.Nfs3FileAttributes)27 FileHandle (org.apache.hadoop.nfs.nfs3.FileHandle)26 VisibleForTesting (com.google.common.annotations.VisibleForTesting)18 Path (org.apache.hadoop.fs.Path)18 DistributedFileSystem (org.apache.hadoop.hdfs.DistributedFileSystem)17 InetSocketAddress (java.net.InetSocketAddress)13 MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)13 Configuration (org.apache.hadoop.conf.Configuration)12 NfsConfiguration (org.apache.hadoop.hdfs.nfs.conf.NfsConfiguration)12 FileSystem (org.apache.hadoop.fs.FileSystem)11 HdfsFileStatus (org.apache.hadoop.hdfs.protocol.HdfsFileStatus)11 HdfsDataOutputStream (org.apache.hadoop.hdfs.client.HdfsDataOutputStream)9 WccData (org.apache.hadoop.nfs.nfs3.response.WccData)9 ShellBasedIdMapping (org.apache.hadoop.security.ShellBasedIdMapping)8 ExtendedBlock (org.apache.hadoop.hdfs.protocol.ExtendedBlock)7 LocatedBlock (org.apache.hadoop.hdfs.protocol.LocatedBlock)7 ArrayList (java.util.ArrayList)6