Search in sources :

Example 6 with FsStatus

use of org.apache.hadoop.fs.FsStatus in project hadoop by apache.

the class RpcProgramNfs3 method fsstat.

@VisibleForTesting
FSSTAT3Response fsstat(XDR xdr, SecurityHandler securityHandler, SocketAddress remoteAddress) {
    FSSTAT3Response response = new FSSTAT3Response(Nfs3Status.NFS3_OK);
    if (!checkAccessPrivilege(remoteAddress, AccessPrivilege.READ_ONLY)) {
        response.setStatus(Nfs3Status.NFS3ERR_ACCES);
        return response;
    }
    DFSClient dfsClient = clientCache.getDfsClient(securityHandler.getUser());
    if (dfsClient == null) {
        response.setStatus(Nfs3Status.NFS3ERR_SERVERFAULT);
        return response;
    }
    FSSTAT3Request request;
    try {
        request = FSSTAT3Request.deserialize(xdr);
    } catch (IOException e) {
        LOG.error("Invalid FSSTAT request");
        return new FSSTAT3Response(Nfs3Status.NFS3ERR_INVAL);
    }
    FileHandle handle = request.getHandle();
    if (LOG.isDebugEnabled()) {
        LOG.debug("NFS FSSTAT fileId: " + handle.getFileId() + " client: " + remoteAddress);
    }
    try {
        FsStatus fsStatus = dfsClient.getDiskStatus();
        long totalBytes = fsStatus.getCapacity();
        long freeBytes = fsStatus.getRemaining();
        Nfs3FileAttributes attrs = writeManager.getFileAttr(dfsClient, handle, iug);
        if (attrs == null) {
            LOG.info("Can't get path for fileId: " + handle.getFileId());
            return new FSSTAT3Response(Nfs3Status.NFS3ERR_STALE);
        }
        long maxFsObjects = config.getLong("dfs.max.objects", 0);
        if (maxFsObjects == 0) {
            // A value of zero in HDFS indicates no limit to the number
            // of objects that dfs supports. Using Integer.MAX_VALUE instead of
            // Long.MAX_VALUE so 32bit client won't complain.
            maxFsObjects = Integer.MAX_VALUE;
        }
        return new FSSTAT3Response(Nfs3Status.NFS3_OK, attrs, totalBytes, freeBytes, freeBytes, maxFsObjects, maxFsObjects, maxFsObjects, 0);
    } catch (RemoteException r) {
        LOG.warn("Exception ", r);
        IOException io = r.unwrapRemoteException();
        /**
       * AuthorizationException can be thrown if the user can't be proxy'ed.
       */
        if (io instanceof AuthorizationException) {
            return new FSSTAT3Response(Nfs3Status.NFS3ERR_ACCES);
        } else {
            return new FSSTAT3Response(Nfs3Status.NFS3ERR_IO);
        }
    } catch (IOException e) {
        LOG.warn("Exception ", e);
        int status = mapErrorStatus(e);
        return new FSSTAT3Response(status);
    }
}
Also used : DFSClient(org.apache.hadoop.hdfs.DFSClient) AuthorizationException(org.apache.hadoop.security.authorize.AuthorizationException) FileHandle(org.apache.hadoop.nfs.nfs3.FileHandle) Nfs3FileAttributes(org.apache.hadoop.nfs.nfs3.Nfs3FileAttributes) FSSTAT3Response(org.apache.hadoop.nfs.nfs3.response.FSSTAT3Response) IOException(java.io.IOException) FSSTAT3Request(org.apache.hadoop.nfs.nfs3.request.FSSTAT3Request) RemoteException(org.apache.hadoop.ipc.RemoteException) FsStatus(org.apache.hadoop.fs.FsStatus) VisibleForTesting(com.google.common.annotations.VisibleForTesting)

Example 7 with FsStatus

use of org.apache.hadoop.fs.FsStatus in project hadoop by apache.

the class TestDefaultContainerExecutor method testStartLocalizer.

@Test(timeout = 30000)
public void testStartLocalizer() throws IOException, InterruptedException, YarnException {
    final Path firstDir = new Path(BASE_TMP_PATH, "localDir1");
    List<String> localDirs = new ArrayList<String>();
    final Path secondDir = new Path(BASE_TMP_PATH, "localDir2");
    List<String> logDirs = new ArrayList<String>();
    final Path logDir = new Path(BASE_TMP_PATH, "logDir");
    final Path tokenDir = new Path(BASE_TMP_PATH, "tokenDir");
    FsPermission perms = new FsPermission((short) 0770);
    Configuration conf = new Configuration();
    final FileContext mockLfs = spy(FileContext.getLocalFSFileContext(conf));
    final FileContext.Util mockUtil = spy(mockLfs.util());
    doAnswer(new Answer() {

        @Override
        public Object answer(InvocationOnMock invocationOnMock) throws Throwable {
            return mockUtil;
        }
    }).when(mockLfs).util();
    doAnswer(new Answer() {

        @Override
        public Object answer(InvocationOnMock invocationOnMock) throws Throwable {
            Path dest = (Path) invocationOnMock.getArguments()[1];
            if (dest.toString().contains(firstDir.toString())) {
                // to simulate no space on the first drive
                throw new IOException("No space on this drive " + dest.toString());
            } else {
                // copy token to the second local dir
                DataOutputStream tokenOut = null;
                try {
                    Credentials credentials = new Credentials();
                    tokenOut = mockLfs.create(dest, EnumSet.of(CREATE, OVERWRITE));
                    credentials.writeTokenStorageToStream(tokenOut);
                } finally {
                    if (tokenOut != null) {
                        tokenOut.close();
                    }
                }
            }
            return null;
        }
    }).when(mockUtil).copy(any(Path.class), any(Path.class), anyBoolean(), anyBoolean());
    doAnswer(new Answer() {

        @Override
        public Object answer(InvocationOnMock invocationOnMock) throws Throwable {
            Path p = (Path) invocationOnMock.getArguments()[0];
            // first local directory
            if (p.toString().contains(firstDir.toString())) {
                return new FsStatus(2000, 2000, 0);
            } else {
                return new FsStatus(1000, 0, 1000);
            }
        }
    }).when(mockLfs).getFsStatus(any(Path.class));
    DefaultContainerExecutor mockExec = spy(new DefaultContainerExecutor(mockLfs) {

        @Override
        public ContainerLocalizer createContainerLocalizer(String user, String appId, String locId, List<String> localDirs, FileContext localizerFc) throws IOException {
            // Spy on the localizer and make it return valid heart-beat
            // responses even though there is no real NodeManager.
            ContainerLocalizer localizer = super.createContainerLocalizer(user, appId, locId, localDirs, localizerFc);
            ContainerLocalizer spyLocalizer = spy(localizer);
            LocalizationProtocol nmProxy = mock(LocalizationProtocol.class);
            try {
                when(nmProxy.heartbeat(isA(LocalizerStatus.class))).thenReturn(new MockLocalizerHeartbeatResponse(LocalizerAction.DIE, new ArrayList<ResourceLocalizationSpec>()));
            } catch (YarnException e) {
                throw new IOException(e);
            }
            when(spyLocalizer.getProxy(any(InetSocketAddress.class))).thenReturn(nmProxy);
            return spyLocalizer;
        }
    });
    mockExec.setConf(conf);
    localDirs.add(mockLfs.makeQualified(firstDir).toString());
    localDirs.add(mockLfs.makeQualified(secondDir).toString());
    logDirs.add(mockLfs.makeQualified(logDir).toString());
    conf.setStrings(YarnConfiguration.NM_LOCAL_DIRS, localDirs.toArray(new String[localDirs.size()]));
    conf.set(YarnConfiguration.NM_LOG_DIRS, logDir.toString());
    mockLfs.mkdir(tokenDir, perms, true);
    Path nmPrivateCTokensPath = new Path(tokenDir, "test.tokens");
    String appSubmitter = "nobody";
    String appId = "APP_ID";
    String locId = "LOC_ID";
    LocalDirsHandlerService dirsHandler = mock(LocalDirsHandlerService.class);
    when(dirsHandler.getLocalDirs()).thenReturn(localDirs);
    when(dirsHandler.getLogDirs()).thenReturn(logDirs);
    try {
        mockExec.startLocalizer(new LocalizerStartContext.Builder().setNmPrivateContainerTokens(nmPrivateCTokensPath).setNmAddr(null).setUser(appSubmitter).setAppId(appId).setLocId(locId).setDirsHandler(dirsHandler).build());
    } catch (IOException e) {
        Assert.fail("StartLocalizer failed to copy token file: " + StringUtils.stringifyException(e));
    } finally {
        mockExec.deleteAsUser(new DeletionAsUserContext.Builder().setUser(appSubmitter).setSubDir(firstDir).build());
        mockExec.deleteAsUser(new DeletionAsUserContext.Builder().setUser(appSubmitter).setSubDir(secondDir).build());
        mockExec.deleteAsUser(new DeletionAsUserContext.Builder().setUser(appSubmitter).setSubDir(logDir).build());
        deleteTmpFiles();
    }
    // Verify that the calls happen the expected number of times
    verify(mockUtil, times(1)).copy(any(Path.class), any(Path.class), anyBoolean(), anyBoolean());
    verify(mockLfs, times(2)).getFsStatus(any(Path.class));
}
Also used : Configuration(org.apache.hadoop.conf.Configuration) YarnConfiguration(org.apache.hadoop.yarn.conf.YarnConfiguration) MockLocalizerHeartbeatResponse(org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.MockLocalizerHeartbeatResponse) DataOutputStream(java.io.DataOutputStream) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream) ArrayList(java.util.ArrayList) FsStatus(org.apache.hadoop.fs.FsStatus) YarnException(org.apache.hadoop.yarn.exceptions.YarnException) ResourceLocalizationSpec(org.apache.hadoop.yarn.server.nodemanager.api.ResourceLocalizationSpec) FsPermission(org.apache.hadoop.fs.permission.FsPermission) Path(org.apache.hadoop.fs.Path) IOException(java.io.IOException) Mockito.doAnswer(org.mockito.Mockito.doAnswer) Answer(org.mockito.stubbing.Answer) LocalizationProtocol(org.apache.hadoop.yarn.server.nodemanager.api.LocalizationProtocol) InvocationOnMock(org.mockito.invocation.InvocationOnMock) ContainerLocalizer(org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.ContainerLocalizer) FileContext(org.apache.hadoop.fs.FileContext) Credentials(org.apache.hadoop.security.Credentials) Test(org.junit.Test)

Example 8 with FsStatus

use of org.apache.hadoop.fs.FsStatus in project ignite by apache.

the class HadoopIgfs20FileSystemAbstractSelfTest method testStatus.

/** @throws Exception If failed. */
public void testStatus() throws Exception {
    Path file1 = new Path("/file1");
    try (FSDataOutputStream file = fs.create(file1, EnumSet.noneOf(CreateFlag.class), Options.CreateOpts.perms(FsPermission.getDefault()))) {
        file.write(new byte[1024 * 1024]);
    }
    FsStatus status = fs.getFsStatus();
    assertEquals(getClientFsUser(), fs.getFileStatus(file1).getOwner());
    assertEquals(4, grid(0).cluster().nodes().size());
    long used = 0, max = 0;
    for (int i = 0; i < 4; i++) {
        IgniteFileSystem igfs = grid(i).fileSystem("igfs");
        IgfsMetrics metrics = igfs.metrics();
        used += metrics.localSpaceSize();
        max += metrics.maxSpaceSize();
    }
    assertEquals(used, status.getUsed());
    assertEquals(max, status.getCapacity());
}
Also used : Path(org.apache.hadoop.fs.Path) IgfsPath(org.apache.ignite.igfs.IgfsPath) CreateFlag(org.apache.hadoop.fs.CreateFlag) IgfsMetrics(org.apache.ignite.igfs.IgfsMetrics) IgniteFileSystem(org.apache.ignite.IgniteFileSystem) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream) FsStatus(org.apache.hadoop.fs.FsStatus)

Example 9 with FsStatus

use of org.apache.hadoop.fs.FsStatus in project cdap by caskdata.

the class HDFSStorage method collect.

@Override
public synchronized void collect() throws IOException {
    try (DistributedFileSystem dfs = createDFS()) {
        if (dfs == null) {
            return;
        }
        FsStatus status = dfs.getStatus();
        this.totalBytes = status.getCapacity();
        this.availableBytes = status.getRemaining();
        this.usedBytes = status.getUsed();
        this.missingBlocks = dfs.getMissingBlocksCount();
        this.underReplicatedBlocks = dfs.getUnderReplicatedBlocksCount();
        this.corruptBlocks = dfs.getCorruptBlocksCount();
    }
}
Also used : DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) FsStatus(org.apache.hadoop.fs.FsStatus)

Aggregations

FsStatus (org.apache.hadoop.fs.FsStatus)9 Path (org.apache.hadoop.fs.Path)4 Test (org.junit.Test)3 IOException (java.io.IOException)2 Configuration (org.apache.hadoop.conf.Configuration)2 FSDataOutputStream (org.apache.hadoop.fs.FSDataOutputStream)2 MountPoint (org.apache.hadoop.fs.viewfs.ViewFileSystem.MountPoint)2 DistributedFileSystem (org.apache.hadoop.hdfs.DistributedFileSystem)2 VisibleForTesting (com.google.common.annotations.VisibleForTesting)1 DataOutputStream (java.io.DataOutputStream)1 ArrayList (java.util.ArrayList)1 HashMap (java.util.HashMap)1 CreateFlag (org.apache.hadoop.fs.CreateFlag)1 FileContext (org.apache.hadoop.fs.FileContext)1 FileSystem (org.apache.hadoop.fs.FileSystem)1 UnsupportedFileSystemException (org.apache.hadoop.fs.UnsupportedFileSystemException)1 FsPermission (org.apache.hadoop.fs.permission.FsPermission)1 DFSClient (org.apache.hadoop.hdfs.DFSClient)1 DatanodeInfo (org.apache.hadoop.hdfs.protocol.DatanodeInfo)1 RemoteException (org.apache.hadoop.ipc.RemoteException)1