use of org.apache.hadoop.lib.service.Instrumentation in project hadoop by apache.
the class HttpFSServer method get.
/**
* Binding to handle GET requests, supported operations are
*
* @param path the path for operation.
* @param op the HttpFS operation of the request.
* @param params the HttpFS parameters of the request.
*
* @return the request response.
*
* @throws IOException thrown if an IO error occurred. Thrown exceptions are
* handled by {@link HttpFSExceptionProvider}.
* @throws FileSystemAccessException thrown if a FileSystemAccess releated
* error occurred. Thrown exceptions are handled by
* {@link HttpFSExceptionProvider}.
*/
@GET
@Path("{path:.*}")
@Produces({ MediaType.APPLICATION_OCTET_STREAM + "; " + JettyUtils.UTF_8, MediaType.APPLICATION_JSON + "; " + JettyUtils.UTF_8 })
public Response get(@PathParam("path") String path, @QueryParam(OperationParam.NAME) OperationParam op, @Context Parameters params, @Context HttpServletRequest request) throws IOException, FileSystemAccessException {
UserGroupInformation user = HttpUserGroupInformation.get();
Response response;
path = makeAbsolute(path);
MDC.put(HttpFSFileSystem.OP_PARAM, op.value().name());
MDC.put("hostname", request.getRemoteAddr());
switch(op.value()) {
case OPEN:
{
//Invoking the command directly using an unmanaged FileSystem that is
// released by the FileSystemReleaseFilter
final FSOperations.FSOpen command = new FSOperations.FSOpen(path);
final FileSystem fs = createFileSystem(user);
InputStream is = null;
UserGroupInformation ugi = UserGroupInformation.createProxyUser(user.getShortUserName(), UserGroupInformation.getLoginUser());
try {
is = ugi.doAs(new PrivilegedExceptionAction<InputStream>() {
@Override
public InputStream run() throws Exception {
return command.execute(fs);
}
});
} catch (InterruptedException ie) {
LOG.info("Open interrupted.", ie);
Thread.currentThread().interrupt();
}
Long offset = params.get(OffsetParam.NAME, OffsetParam.class);
Long len = params.get(LenParam.NAME, LenParam.class);
AUDIT_LOG.info("[{}] offset [{}] len [{}]", new Object[] { path, offset, len });
InputStreamEntity entity = new InputStreamEntity(is, offset, len);
response = Response.ok(entity).type(MediaType.APPLICATION_OCTET_STREAM).build();
break;
}
case GETFILESTATUS:
{
FSOperations.FSFileStatus command = new FSOperations.FSFileStatus(path);
Map json = fsExecute(user, command);
AUDIT_LOG.info("[{}]", path);
response = Response.ok(json).type(MediaType.APPLICATION_JSON).build();
break;
}
case LISTSTATUS:
{
String filter = params.get(FilterParam.NAME, FilterParam.class);
FSOperations.FSListStatus command = new FSOperations.FSListStatus(path, filter);
Map json = fsExecute(user, command);
AUDIT_LOG.info("[{}] filter [{}]", path, (filter != null) ? filter : "-");
response = Response.ok(json).type(MediaType.APPLICATION_JSON).build();
break;
}
case GETHOMEDIRECTORY:
{
enforceRootPath(op.value(), path);
FSOperations.FSHomeDir command = new FSOperations.FSHomeDir();
JSONObject json = fsExecute(user, command);
AUDIT_LOG.info("");
response = Response.ok(json).type(MediaType.APPLICATION_JSON).build();
break;
}
case INSTRUMENTATION:
{
enforceRootPath(op.value(), path);
Groups groups = HttpFSServerWebApp.get().get(Groups.class);
List<String> userGroups = groups.getGroups(user.getShortUserName());
if (!userGroups.contains(HttpFSServerWebApp.get().getAdminGroup())) {
throw new AccessControlException("User not in HttpFSServer admin group");
}
Instrumentation instrumentation = HttpFSServerWebApp.get().get(Instrumentation.class);
Map snapshot = instrumentation.getSnapshot();
response = Response.ok(snapshot).build();
break;
}
case GETCONTENTSUMMARY:
{
FSOperations.FSContentSummary command = new FSOperations.FSContentSummary(path);
Map json = fsExecute(user, command);
AUDIT_LOG.info("[{}]", path);
response = Response.ok(json).type(MediaType.APPLICATION_JSON).build();
break;
}
case GETFILECHECKSUM:
{
FSOperations.FSFileChecksum command = new FSOperations.FSFileChecksum(path);
Map json = fsExecute(user, command);
AUDIT_LOG.info("[{}]", path);
response = Response.ok(json).type(MediaType.APPLICATION_JSON).build();
break;
}
case GETFILEBLOCKLOCATIONS:
{
response = Response.status(Response.Status.BAD_REQUEST).build();
break;
}
case GETACLSTATUS:
{
FSOperations.FSAclStatus command = new FSOperations.FSAclStatus(path);
Map json = fsExecute(user, command);
AUDIT_LOG.info("ACL status for [{}]", path);
response = Response.ok(json).type(MediaType.APPLICATION_JSON).build();
break;
}
case GETXATTRS:
{
List<String> xattrNames = params.getValues(XAttrNameParam.NAME, XAttrNameParam.class);
XAttrCodec encoding = params.get(XAttrEncodingParam.NAME, XAttrEncodingParam.class);
FSOperations.FSGetXAttrs command = new FSOperations.FSGetXAttrs(path, xattrNames, encoding);
@SuppressWarnings("rawtypes") Map json = fsExecute(user, command);
AUDIT_LOG.info("XAttrs for [{}]", path);
response = Response.ok(json).type(MediaType.APPLICATION_JSON).build();
break;
}
case LISTXATTRS:
{
FSOperations.FSListXAttrs command = new FSOperations.FSListXAttrs(path);
@SuppressWarnings("rawtypes") Map json = fsExecute(user, command);
AUDIT_LOG.info("XAttr names for [{}]", path);
response = Response.ok(json).type(MediaType.APPLICATION_JSON).build();
break;
}
case LISTSTATUS_BATCH:
{
String startAfter = params.get(HttpFSParametersProvider.StartAfterParam.NAME, HttpFSParametersProvider.StartAfterParam.class);
byte[] token = HttpFSUtils.EMPTY_BYTES;
if (startAfter != null) {
token = startAfter.getBytes(Charsets.UTF_8);
}
FSOperations.FSListStatusBatch command = new FSOperations.FSListStatusBatch(path, token);
@SuppressWarnings("rawtypes") Map json = fsExecute(user, command);
AUDIT_LOG.info("[{}] token [{}]", path, token);
response = Response.ok(json).type(MediaType.APPLICATION_JSON).build();
break;
}
case GETTRASHROOT:
{
FSOperations.FSTrashRoot command = new FSOperations.FSTrashRoot(path);
JSONObject json = fsExecute(user, command);
AUDIT_LOG.info("[{}]", path);
response = Response.ok(json).type(MediaType.APPLICATION_JSON).build();
break;
}
case GETALLSTORAGEPOLICY:
{
FSOperations.FSGetAllStoragePolicies command = new FSOperations.FSGetAllStoragePolicies();
JSONObject json = fsExecute(user, command);
AUDIT_LOG.info("[{}]", path);
response = Response.ok(json).type(MediaType.APPLICATION_JSON).build();
break;
}
case GETSTORAGEPOLICY:
{
FSOperations.FSGetStoragePolicy command = new FSOperations.FSGetStoragePolicy(path);
JSONObject json = fsExecute(user, command);
AUDIT_LOG.info("[{}]", path);
response = Response.ok(json).type(MediaType.APPLICATION_JSON).build();
break;
}
default:
{
throw new IOException(MessageFormat.format("Invalid HTTP GET operation [{0}]", op.value()));
}
}
return response;
}
use of org.apache.hadoop.lib.service.Instrumentation in project hadoop by apache.
the class FileSystemAccessService method postInit.
@Override
public void postInit() throws ServiceException {
super.postInit();
Instrumentation instrumentation = getServer().get(Instrumentation.class);
instrumentation.addVariable(INSTRUMENTATION_GROUP, "unmanaged.fs", new Instrumentation.Variable<Integer>() {
@Override
public Integer getValue() {
return unmanagedFileSystems.get();
}
});
instrumentation.addSampler(INSTRUMENTATION_GROUP, "unmanaged.fs", 60, new Instrumentation.Variable<Long>() {
@Override
public Long getValue() {
return (long) unmanagedFileSystems.get();
}
});
Scheduler scheduler = getServer().get(Scheduler.class);
int purgeInterval = getServiceConfig().getInt(FS_CACHE_PURGE_FREQUENCY, 60);
purgeTimeout = getServiceConfig().getLong(FS_CACHE_PURGE_TIMEOUT, 60);
purgeTimeout = (purgeTimeout > 0) ? purgeTimeout : 0;
if (purgeTimeout > 0) {
scheduler.schedule(new FileSystemCachePurger(), purgeInterval, purgeInterval, TimeUnit.SECONDS);
}
}
use of org.apache.hadoop.lib.service.Instrumentation in project hadoop by apache.
the class FileSystemAccessService method execute.
@Override
public <T> T execute(String user, final Configuration conf, final FileSystemExecutor<T> executor) throws FileSystemAccessException {
Check.notEmpty(user, "user");
Check.notNull(conf, "conf");
Check.notNull(executor, "executor");
if (!conf.getBoolean(FILE_SYSTEM_SERVICE_CREATED, false)) {
throw new FileSystemAccessException(FileSystemAccessException.ERROR.H04);
}
if (conf.get(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY) == null || conf.getTrimmed(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY).length() == 0) {
throw new FileSystemAccessException(FileSystemAccessException.ERROR.H06, CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY);
}
try {
validateNamenode(new URI(conf.get(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY)).getAuthority());
UserGroupInformation ugi = getUGI(user);
return ugi.doAs(new PrivilegedExceptionAction<T>() {
@Override
public T run() throws Exception {
FileSystem fs = createFileSystem(conf);
Instrumentation instrumentation = getServer().get(Instrumentation.class);
Instrumentation.Cron cron = instrumentation.createCron();
try {
checkNameNodeHealth(fs);
cron.start();
return executor.execute(fs);
} finally {
cron.stop();
instrumentation.addCron(INSTRUMENTATION_GROUP, executor.getClass().getSimpleName(), cron);
closeFileSystem(fs);
}
}
});
} catch (FileSystemAccessException ex) {
throw ex;
} catch (Exception ex) {
throw new FileSystemAccessException(FileSystemAccessException.ERROR.H03, ex);
}
}
use of org.apache.hadoop.lib.service.Instrumentation in project hadoop by apache.
the class SchedulerService method schedule.
@Override
public void schedule(final Callable<?> callable, long delay, long interval, TimeUnit unit) {
Check.notNull(callable, "callable");
if (!scheduler.isShutdown()) {
LOG.debug("Scheduling callable [{}], interval [{}] seconds, delay [{}] in [{}]", new Object[] { callable, delay, interval, unit });
Runnable r = new Runnable() {
@Override
public void run() {
String instrName = callable.getClass().getSimpleName();
Instrumentation instr = getServer().get(Instrumentation.class);
if (getServer().getStatus() == Server.Status.HALTED) {
LOG.debug("Skipping [{}], server status [{}]", callable, getServer().getStatus());
instr.incr(INST_GROUP, instrName + ".skips", 1);
} else {
LOG.debug("Executing [{}]", callable);
instr.incr(INST_GROUP, instrName + ".execs", 1);
Instrumentation.Cron cron = instr.createCron().start();
try {
callable.call();
} catch (Exception ex) {
instr.incr(INST_GROUP, instrName + ".fails", 1);
LOG.error("Error executing [{}], {}", new Object[] { callable, ex.getMessage(), ex });
} finally {
instr.addCron(INST_GROUP, instrName, cron.stop());
}
}
}
};
scheduler.scheduleWithFixedDelay(r, delay, interval, unit);
} else {
throw new IllegalStateException(MessageFormat.format("Scheduler shutting down, ignoring scheduling of [{}]", callable));
}
}
use of org.apache.hadoop.lib.service.Instrumentation in project hadoop by apache.
the class TestInstrumentationService method sampling.
@Test
@TestDir
@SuppressWarnings("unchecked")
public void sampling() throws Exception {
String dir = TestDirHelper.getTestDir().getAbsolutePath();
String services = StringUtils.join(",", Arrays.asList(InstrumentationService.class.getName(), SchedulerService.class.getName()));
Configuration conf = new Configuration(false);
conf.set("server.services", services);
Server server = new Server("server", dir, dir, dir, dir, conf);
server.init();
Instrumentation instrumentation = server.get(Instrumentation.class);
final AtomicInteger count = new AtomicInteger();
Instrumentation.Variable<Long> varToSample = new Instrumentation.Variable<Long>() {
@Override
public Long getValue() {
return (long) count.incrementAndGet();
}
};
instrumentation.addSampler("g", "s", 10, varToSample);
sleep(2000);
int i = count.get();
assertTrue(i > 0);
Map<String, Map<String, ?>> snapshot = instrumentation.getSnapshot();
Map<String, Map<String, Object>> samplers = (Map<String, Map<String, Object>>) snapshot.get("samplers");
InstrumentationService.Sampler sampler = (InstrumentationService.Sampler) samplers.get("g").get("s");
assertTrue(sampler.getRate() > 0);
server.destroy();
}
Aggregations