use of org.apache.hadoop.lib.service.FileSystemAccessException in project hadoop by apache.
the class HttpFSServer method get.
/**
* Binding to handle GET requests, supported operations are
*
* @param path the path for operation.
* @param op the HttpFS operation of the request.
* @param params the HttpFS parameters of the request.
*
* @return the request response.
*
* @throws IOException thrown if an IO error occurred. Thrown exceptions are
* handled by {@link HttpFSExceptionProvider}.
* @throws FileSystemAccessException thrown if a FileSystemAccess releated
* error occurred. Thrown exceptions are handled by
* {@link HttpFSExceptionProvider}.
*/
@GET
@Path("{path:.*}")
@Produces({ MediaType.APPLICATION_OCTET_STREAM + "; " + JettyUtils.UTF_8, MediaType.APPLICATION_JSON + "; " + JettyUtils.UTF_8 })
public Response get(@PathParam("path") String path, @QueryParam(OperationParam.NAME) OperationParam op, @Context Parameters params, @Context HttpServletRequest request) throws IOException, FileSystemAccessException {
UserGroupInformation user = HttpUserGroupInformation.get();
Response response;
path = makeAbsolute(path);
MDC.put(HttpFSFileSystem.OP_PARAM, op.value().name());
MDC.put("hostname", request.getRemoteAddr());
switch(op.value()) {
case OPEN:
{
//Invoking the command directly using an unmanaged FileSystem that is
// released by the FileSystemReleaseFilter
final FSOperations.FSOpen command = new FSOperations.FSOpen(path);
final FileSystem fs = createFileSystem(user);
InputStream is = null;
UserGroupInformation ugi = UserGroupInformation.createProxyUser(user.getShortUserName(), UserGroupInformation.getLoginUser());
try {
is = ugi.doAs(new PrivilegedExceptionAction<InputStream>() {
@Override
public InputStream run() throws Exception {
return command.execute(fs);
}
});
} catch (InterruptedException ie) {
LOG.info("Open interrupted.", ie);
Thread.currentThread().interrupt();
}
Long offset = params.get(OffsetParam.NAME, OffsetParam.class);
Long len = params.get(LenParam.NAME, LenParam.class);
AUDIT_LOG.info("[{}] offset [{}] len [{}]", new Object[] { path, offset, len });
InputStreamEntity entity = new InputStreamEntity(is, offset, len);
response = Response.ok(entity).type(MediaType.APPLICATION_OCTET_STREAM).build();
break;
}
case GETFILESTATUS:
{
FSOperations.FSFileStatus command = new FSOperations.FSFileStatus(path);
Map json = fsExecute(user, command);
AUDIT_LOG.info("[{}]", path);
response = Response.ok(json).type(MediaType.APPLICATION_JSON).build();
break;
}
case LISTSTATUS:
{
String filter = params.get(FilterParam.NAME, FilterParam.class);
FSOperations.FSListStatus command = new FSOperations.FSListStatus(path, filter);
Map json = fsExecute(user, command);
AUDIT_LOG.info("[{}] filter [{}]", path, (filter != null) ? filter : "-");
response = Response.ok(json).type(MediaType.APPLICATION_JSON).build();
break;
}
case GETHOMEDIRECTORY:
{
enforceRootPath(op.value(), path);
FSOperations.FSHomeDir command = new FSOperations.FSHomeDir();
JSONObject json = fsExecute(user, command);
AUDIT_LOG.info("");
response = Response.ok(json).type(MediaType.APPLICATION_JSON).build();
break;
}
case INSTRUMENTATION:
{
enforceRootPath(op.value(), path);
Groups groups = HttpFSServerWebApp.get().get(Groups.class);
List<String> userGroups = groups.getGroups(user.getShortUserName());
if (!userGroups.contains(HttpFSServerWebApp.get().getAdminGroup())) {
throw new AccessControlException("User not in HttpFSServer admin group");
}
Instrumentation instrumentation = HttpFSServerWebApp.get().get(Instrumentation.class);
Map snapshot = instrumentation.getSnapshot();
response = Response.ok(snapshot).build();
break;
}
case GETCONTENTSUMMARY:
{
FSOperations.FSContentSummary command = new FSOperations.FSContentSummary(path);
Map json = fsExecute(user, command);
AUDIT_LOG.info("[{}]", path);
response = Response.ok(json).type(MediaType.APPLICATION_JSON).build();
break;
}
case GETFILECHECKSUM:
{
FSOperations.FSFileChecksum command = new FSOperations.FSFileChecksum(path);
Map json = fsExecute(user, command);
AUDIT_LOG.info("[{}]", path);
response = Response.ok(json).type(MediaType.APPLICATION_JSON).build();
break;
}
case GETFILEBLOCKLOCATIONS:
{
response = Response.status(Response.Status.BAD_REQUEST).build();
break;
}
case GETACLSTATUS:
{
FSOperations.FSAclStatus command = new FSOperations.FSAclStatus(path);
Map json = fsExecute(user, command);
AUDIT_LOG.info("ACL status for [{}]", path);
response = Response.ok(json).type(MediaType.APPLICATION_JSON).build();
break;
}
case GETXATTRS:
{
List<String> xattrNames = params.getValues(XAttrNameParam.NAME, XAttrNameParam.class);
XAttrCodec encoding = params.get(XAttrEncodingParam.NAME, XAttrEncodingParam.class);
FSOperations.FSGetXAttrs command = new FSOperations.FSGetXAttrs(path, xattrNames, encoding);
@SuppressWarnings("rawtypes") Map json = fsExecute(user, command);
AUDIT_LOG.info("XAttrs for [{}]", path);
response = Response.ok(json).type(MediaType.APPLICATION_JSON).build();
break;
}
case LISTXATTRS:
{
FSOperations.FSListXAttrs command = new FSOperations.FSListXAttrs(path);
@SuppressWarnings("rawtypes") Map json = fsExecute(user, command);
AUDIT_LOG.info("XAttr names for [{}]", path);
response = Response.ok(json).type(MediaType.APPLICATION_JSON).build();
break;
}
case LISTSTATUS_BATCH:
{
String startAfter = params.get(HttpFSParametersProvider.StartAfterParam.NAME, HttpFSParametersProvider.StartAfterParam.class);
byte[] token = HttpFSUtils.EMPTY_BYTES;
if (startAfter != null) {
token = startAfter.getBytes(Charsets.UTF_8);
}
FSOperations.FSListStatusBatch command = new FSOperations.FSListStatusBatch(path, token);
@SuppressWarnings("rawtypes") Map json = fsExecute(user, command);
AUDIT_LOG.info("[{}] token [{}]", path, token);
response = Response.ok(json).type(MediaType.APPLICATION_JSON).build();
break;
}
case GETTRASHROOT:
{
FSOperations.FSTrashRoot command = new FSOperations.FSTrashRoot(path);
JSONObject json = fsExecute(user, command);
AUDIT_LOG.info("[{}]", path);
response = Response.ok(json).type(MediaType.APPLICATION_JSON).build();
break;
}
case GETALLSTORAGEPOLICY:
{
FSOperations.FSGetAllStoragePolicies command = new FSOperations.FSGetAllStoragePolicies();
JSONObject json = fsExecute(user, command);
AUDIT_LOG.info("[{}]", path);
response = Response.ok(json).type(MediaType.APPLICATION_JSON).build();
break;
}
case GETSTORAGEPOLICY:
{
FSOperations.FSGetStoragePolicy command = new FSOperations.FSGetStoragePolicy(path);
JSONObject json = fsExecute(user, command);
AUDIT_LOG.info("[{}]", path);
response = Response.ok(json).type(MediaType.APPLICATION_JSON).build();
break;
}
default:
{
throw new IOException(MessageFormat.format("Invalid HTTP GET operation [{0}]", op.value()));
}
}
return response;
}
use of org.apache.hadoop.lib.service.FileSystemAccessException in project hadoop by apache.
the class FileSystemAccessService method execute.
@Override
public <T> T execute(String user, final Configuration conf, final FileSystemExecutor<T> executor) throws FileSystemAccessException {
Check.notEmpty(user, "user");
Check.notNull(conf, "conf");
Check.notNull(executor, "executor");
if (!conf.getBoolean(FILE_SYSTEM_SERVICE_CREATED, false)) {
throw new FileSystemAccessException(FileSystemAccessException.ERROR.H04);
}
if (conf.get(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY) == null || conf.getTrimmed(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY).length() == 0) {
throw new FileSystemAccessException(FileSystemAccessException.ERROR.H06, CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY);
}
try {
validateNamenode(new URI(conf.get(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY)).getAuthority());
UserGroupInformation ugi = getUGI(user);
return ugi.doAs(new PrivilegedExceptionAction<T>() {
@Override
public T run() throws Exception {
FileSystem fs = createFileSystem(conf);
Instrumentation instrumentation = getServer().get(Instrumentation.class);
Instrumentation.Cron cron = instrumentation.createCron();
try {
checkNameNodeHealth(fs);
cron.start();
return executor.execute(fs);
} finally {
cron.stop();
instrumentation.addCron(INSTRUMENTATION_GROUP, executor.getClass().getSimpleName(), cron);
closeFileSystem(fs);
}
}
});
} catch (FileSystemAccessException ex) {
throw ex;
} catch (Exception ex) {
throw new FileSystemAccessException(FileSystemAccessException.ERROR.H03, ex);
}
}
use of org.apache.hadoop.lib.service.FileSystemAccessException in project hadoop by apache.
the class FileSystemAccessService method createFileSystemInternal.
public FileSystem createFileSystemInternal(String user, final Configuration conf) throws IOException, FileSystemAccessException {
Check.notEmpty(user, "user");
Check.notNull(conf, "conf");
if (!conf.getBoolean(FILE_SYSTEM_SERVICE_CREATED, false)) {
throw new FileSystemAccessException(FileSystemAccessException.ERROR.H04);
}
try {
validateNamenode(new URI(conf.get(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY)).getAuthority());
UserGroupInformation ugi = getUGI(user);
return ugi.doAs(new PrivilegedExceptionAction<FileSystem>() {
@Override
public FileSystem run() throws Exception {
return createFileSystem(conf);
}
});
} catch (IOException ex) {
throw ex;
} catch (FileSystemAccessException ex) {
throw ex;
} catch (Exception ex) {
throw new FileSystemAccessException(FileSystemAccessException.ERROR.H08, ex.getMessage(), ex);
}
}
use of org.apache.hadoop.lib.service.FileSystemAccessException in project hadoop by apache.
the class TestFileSystemAccessService method fileSystemExecutorException.
@Test
@TestDir
@TestHdfs
public void fileSystemExecutorException() throws Exception {
String dir = TestDirHelper.getTestDir().getAbsolutePath();
String services = StringUtils.join(",", Arrays.asList(InstrumentationService.class.getName(), SchedulerService.class.getName(), FileSystemAccessService.class.getName()));
Configuration hadoopConf = new Configuration(false);
hadoopConf.set(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY, TestHdfsHelper.getHdfsConf().get(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY));
createHadoopConf(hadoopConf);
Configuration conf = new Configuration(false);
conf.set("server.services", services);
conf.set("server.hadoop.filesystem.cache.purge.timeout", "0");
Server server = new Server("server", dir, dir, dir, dir, conf);
server.init();
FileSystemAccess hadoop = server.get(FileSystemAccess.class);
final FileSystem[] fsa = new FileSystem[1];
try {
hadoop.execute("u", hadoop.getFileSystemConfiguration(), new FileSystemAccess.FileSystemExecutor<Void>() {
@Override
public Void execute(FileSystem fs) throws IOException {
fsa[0] = fs;
throw new IOException();
}
});
Assert.fail();
} catch (FileSystemAccessException ex) {
Assert.assertEquals(ex.getError(), FileSystemAccessException.ERROR.H03);
} catch (Exception ex) {
Assert.fail();
}
try {
fsa[0].mkdirs(new Path("/tmp/foo"));
Assert.fail();
} catch (IOException ex) {
} catch (Exception ex) {
Assert.fail();
}
server.destroy();
}
Aggregations