use of org.apache.hadoop.fs.FsStatus in project hadoop by apache.
the class RpcProgramNfs3 method fsstat.
@VisibleForTesting
FSSTAT3Response fsstat(XDR xdr, SecurityHandler securityHandler, SocketAddress remoteAddress) {
FSSTAT3Response response = new FSSTAT3Response(Nfs3Status.NFS3_OK);
if (!checkAccessPrivilege(remoteAddress, AccessPrivilege.READ_ONLY)) {
response.setStatus(Nfs3Status.NFS3ERR_ACCES);
return response;
}
DFSClient dfsClient = clientCache.getDfsClient(securityHandler.getUser());
if (dfsClient == null) {
response.setStatus(Nfs3Status.NFS3ERR_SERVERFAULT);
return response;
}
FSSTAT3Request request;
try {
request = FSSTAT3Request.deserialize(xdr);
} catch (IOException e) {
LOG.error("Invalid FSSTAT request");
return new FSSTAT3Response(Nfs3Status.NFS3ERR_INVAL);
}
FileHandle handle = request.getHandle();
if (LOG.isDebugEnabled()) {
LOG.debug("NFS FSSTAT fileId: " + handle.getFileId() + " client: " + remoteAddress);
}
try {
FsStatus fsStatus = dfsClient.getDiskStatus();
long totalBytes = fsStatus.getCapacity();
long freeBytes = fsStatus.getRemaining();
Nfs3FileAttributes attrs = writeManager.getFileAttr(dfsClient, handle, iug);
if (attrs == null) {
LOG.info("Can't get path for fileId: " + handle.getFileId());
return new FSSTAT3Response(Nfs3Status.NFS3ERR_STALE);
}
long maxFsObjects = config.getLong("dfs.max.objects", 0);
if (maxFsObjects == 0) {
// A value of zero in HDFS indicates no limit to the number
// of objects that dfs supports. Using Integer.MAX_VALUE instead of
// Long.MAX_VALUE so 32bit client won't complain.
maxFsObjects = Integer.MAX_VALUE;
}
return new FSSTAT3Response(Nfs3Status.NFS3_OK, attrs, totalBytes, freeBytes, freeBytes, maxFsObjects, maxFsObjects, maxFsObjects, 0);
} catch (RemoteException r) {
LOG.warn("Exception ", r);
IOException io = r.unwrapRemoteException();
/**
* AuthorizationException can be thrown if the user can't be proxy'ed.
*/
if (io instanceof AuthorizationException) {
return new FSSTAT3Response(Nfs3Status.NFS3ERR_ACCES);
} else {
return new FSSTAT3Response(Nfs3Status.NFS3ERR_IO);
}
} catch (IOException e) {
LOG.warn("Exception ", e);
int status = mapErrorStatus(e);
return new FSSTAT3Response(status);
}
}
use of org.apache.hadoop.fs.FsStatus in project hadoop by apache.
the class TestDefaultContainerExecutor method testStartLocalizer.
@Test(timeout = 30000)
public void testStartLocalizer() throws IOException, InterruptedException, YarnException {
final Path firstDir = new Path(BASE_TMP_PATH, "localDir1");
List<String> localDirs = new ArrayList<String>();
final Path secondDir = new Path(BASE_TMP_PATH, "localDir2");
List<String> logDirs = new ArrayList<String>();
final Path logDir = new Path(BASE_TMP_PATH, "logDir");
final Path tokenDir = new Path(BASE_TMP_PATH, "tokenDir");
FsPermission perms = new FsPermission((short) 0770);
Configuration conf = new Configuration();
final FileContext mockLfs = spy(FileContext.getLocalFSFileContext(conf));
final FileContext.Util mockUtil = spy(mockLfs.util());
doAnswer(new Answer() {
@Override
public Object answer(InvocationOnMock invocationOnMock) throws Throwable {
return mockUtil;
}
}).when(mockLfs).util();
doAnswer(new Answer() {
@Override
public Object answer(InvocationOnMock invocationOnMock) throws Throwable {
Path dest = (Path) invocationOnMock.getArguments()[1];
if (dest.toString().contains(firstDir.toString())) {
// to simulate no space on the first drive
throw new IOException("No space on this drive " + dest.toString());
} else {
// copy token to the second local dir
DataOutputStream tokenOut = null;
try {
Credentials credentials = new Credentials();
tokenOut = mockLfs.create(dest, EnumSet.of(CREATE, OVERWRITE));
credentials.writeTokenStorageToStream(tokenOut);
} finally {
if (tokenOut != null) {
tokenOut.close();
}
}
}
return null;
}
}).when(mockUtil).copy(any(Path.class), any(Path.class), anyBoolean(), anyBoolean());
doAnswer(new Answer() {
@Override
public Object answer(InvocationOnMock invocationOnMock) throws Throwable {
Path p = (Path) invocationOnMock.getArguments()[0];
// first local directory
if (p.toString().contains(firstDir.toString())) {
return new FsStatus(2000, 2000, 0);
} else {
return new FsStatus(1000, 0, 1000);
}
}
}).when(mockLfs).getFsStatus(any(Path.class));
DefaultContainerExecutor mockExec = spy(new DefaultContainerExecutor(mockLfs) {
@Override
public ContainerLocalizer createContainerLocalizer(String user, String appId, String locId, List<String> localDirs, FileContext localizerFc) throws IOException {
// Spy on the localizer and make it return valid heart-beat
// responses even though there is no real NodeManager.
ContainerLocalizer localizer = super.createContainerLocalizer(user, appId, locId, localDirs, localizerFc);
ContainerLocalizer spyLocalizer = spy(localizer);
LocalizationProtocol nmProxy = mock(LocalizationProtocol.class);
try {
when(nmProxy.heartbeat(isA(LocalizerStatus.class))).thenReturn(new MockLocalizerHeartbeatResponse(LocalizerAction.DIE, new ArrayList<ResourceLocalizationSpec>()));
} catch (YarnException e) {
throw new IOException(e);
}
when(spyLocalizer.getProxy(any(InetSocketAddress.class))).thenReturn(nmProxy);
return spyLocalizer;
}
});
mockExec.setConf(conf);
localDirs.add(mockLfs.makeQualified(firstDir).toString());
localDirs.add(mockLfs.makeQualified(secondDir).toString());
logDirs.add(mockLfs.makeQualified(logDir).toString());
conf.setStrings(YarnConfiguration.NM_LOCAL_DIRS, localDirs.toArray(new String[localDirs.size()]));
conf.set(YarnConfiguration.NM_LOG_DIRS, logDir.toString());
mockLfs.mkdir(tokenDir, perms, true);
Path nmPrivateCTokensPath = new Path(tokenDir, "test.tokens");
String appSubmitter = "nobody";
String appId = "APP_ID";
String locId = "LOC_ID";
LocalDirsHandlerService dirsHandler = mock(LocalDirsHandlerService.class);
when(dirsHandler.getLocalDirs()).thenReturn(localDirs);
when(dirsHandler.getLogDirs()).thenReturn(logDirs);
try {
mockExec.startLocalizer(new LocalizerStartContext.Builder().setNmPrivateContainerTokens(nmPrivateCTokensPath).setNmAddr(null).setUser(appSubmitter).setAppId(appId).setLocId(locId).setDirsHandler(dirsHandler).build());
} catch (IOException e) {
Assert.fail("StartLocalizer failed to copy token file: " + StringUtils.stringifyException(e));
} finally {
mockExec.deleteAsUser(new DeletionAsUserContext.Builder().setUser(appSubmitter).setSubDir(firstDir).build());
mockExec.deleteAsUser(new DeletionAsUserContext.Builder().setUser(appSubmitter).setSubDir(secondDir).build());
mockExec.deleteAsUser(new DeletionAsUserContext.Builder().setUser(appSubmitter).setSubDir(logDir).build());
deleteTmpFiles();
}
// Verify that the calls happen the expected number of times
verify(mockUtil, times(1)).copy(any(Path.class), any(Path.class), anyBoolean(), anyBoolean());
verify(mockLfs, times(2)).getFsStatus(any(Path.class));
}
use of org.apache.hadoop.fs.FsStatus in project ignite by apache.
the class HadoopIgfs20FileSystemAbstractSelfTest method testStatus.
/** @throws Exception If failed. */
public void testStatus() throws Exception {
Path file1 = new Path("/file1");
try (FSDataOutputStream file = fs.create(file1, EnumSet.noneOf(CreateFlag.class), Options.CreateOpts.perms(FsPermission.getDefault()))) {
file.write(new byte[1024 * 1024]);
}
FsStatus status = fs.getFsStatus();
assertEquals(getClientFsUser(), fs.getFileStatus(file1).getOwner());
assertEquals(4, grid(0).cluster().nodes().size());
long used = 0, max = 0;
for (int i = 0; i < 4; i++) {
IgniteFileSystem igfs = grid(i).fileSystem("igfs");
IgfsMetrics metrics = igfs.metrics();
used += metrics.localSpaceSize();
max += metrics.maxSpaceSize();
}
assertEquals(used, status.getUsed());
assertEquals(max, status.getCapacity());
}
use of org.apache.hadoop.fs.FsStatus in project cdap by caskdata.
the class HDFSStorage method collect.
@Override
public synchronized void collect() throws IOException {
try (DistributedFileSystem dfs = createDFS()) {
if (dfs == null) {
return;
}
FsStatus status = dfs.getStatus();
this.totalBytes = status.getCapacity();
this.availableBytes = status.getRemaining();
this.usedBytes = status.getUsed();
this.missingBlocks = dfs.getMissingBlocksCount();
this.underReplicatedBlocks = dfs.getUnderReplicatedBlocksCount();
this.corruptBlocks = dfs.getCorruptBlocksCount();
}
}
Aggregations