use of org.apache.hadoop.http.HttpServer2 in project hadoop by apache.
the class TestJobEndNotifier method testNotificationOnLastRetryUnregistrationFailure.
@Test
public void testNotificationOnLastRetryUnregistrationFailure() throws Exception {
HttpServer2 server = startHttpServer();
MRApp app = spy(new MRAppWithCustomContainerAllocator(2, 2, false, this.getClass().getName(), true, 2, false));
// Currently, we will have isLastRetry always equals to false at beginning
// of MRAppMaster, except staging area exists or commit already started at
// the beginning.
// Now manually set isLastRetry to true and this should reset to false when
// unregister failed.
app.isLastAMRetry = true;
doNothing().when(app).sysexit();
JobConf conf = new JobConf();
conf.set(JobContext.MR_JOB_END_NOTIFICATION_URL, JobEndServlet.baseUrl + "jobend?jobid=$jobId&status=$jobStatus");
JobImpl job = (JobImpl) app.submit(conf);
app.waitForState(job, JobState.RUNNING);
app.getContext().getEventHandler().handle(new JobEvent(app.getJobId(), JobEventType.JOB_AM_REBOOT));
app.waitForInternalState(job, JobStateInternal.REBOOT);
// Now shutdown. User should see FAILED state.
// Unregistration fails: isLastAMRetry is recalculated, this is
///reboot will stop service internally, we don't need to shutdown twice
app.waitForServiceToStop(10000);
Assert.assertFalse(app.isLastAMRetry());
// Since it's not last retry, JobEndServlet didn't called
Assert.assertEquals(0, JobEndServlet.calledTimes);
Assert.assertNull(JobEndServlet.requestUri);
Assert.assertNull(JobEndServlet.foundJobState);
server.stop();
}
use of org.apache.hadoop.http.HttpServer2 in project hadoop by apache.
the class JournalNodeHttpServer method start.
void start() throws IOException {
final InetSocketAddress httpAddr = getAddress(conf);
final String httpsAddrString = conf.get(DFSConfigKeys.DFS_JOURNALNODE_HTTPS_ADDRESS_KEY, DFSConfigKeys.DFS_JOURNALNODE_HTTPS_ADDRESS_DEFAULT);
InetSocketAddress httpsAddr = NetUtils.createSocketAddr(httpsAddrString);
HttpServer2.Builder builder = DFSUtil.httpServerTemplateForNNAndJN(conf, httpAddr, httpsAddr, "journal", DFSConfigKeys.DFS_JOURNALNODE_KERBEROS_INTERNAL_SPNEGO_PRINCIPAL_KEY, DFSConfigKeys.DFS_JOURNALNODE_KEYTAB_FILE_KEY);
httpServer = builder.build();
httpServer.setAttribute(JN_ATTRIBUTE_KEY, localJournalNode);
httpServer.setAttribute(JspHelper.CURRENT_CONF, conf);
httpServer.addInternalServlet("getJournal", "/getJournal", GetJournalEditServlet.class, true);
httpServer.start();
}
use of org.apache.hadoop.http.HttpServer2 in project hadoop by apache.
the class Nfs3HttpServer method start.
void start() throws IOException {
final InetSocketAddress httpAddr = getHttpAddress(conf);
final String httpsAddrString = conf.get(NfsConfigKeys.NFS_HTTPS_ADDRESS_KEY, NfsConfigKeys.NFS_HTTPS_ADDRESS_DEFAULT);
InetSocketAddress httpsAddr = NetUtils.createSocketAddr(httpsAddrString);
HttpServer2.Builder builder = DFSUtil.httpServerTemplateForNNAndJN(conf, httpAddr, httpsAddr, "nfs3", NfsConfigKeys.DFS_NFS_KERBEROS_PRINCIPAL_KEY, NfsConfigKeys.DFS_NFS_KEYTAB_FILE_KEY);
this.httpServer = builder.build();
this.httpServer.start();
HttpConfig.Policy policy = DFSUtil.getHttpPolicy(conf);
int connIdx = 0;
if (policy.isHttpEnabled()) {
infoPort = httpServer.getConnectorAddress(connIdx++).getPort();
}
if (policy.isHttpsEnabled()) {
infoSecurePort = httpServer.getConnectorAddress(connIdx).getPort();
}
}
use of org.apache.hadoop.http.HttpServer2 in project hadoop by apache.
the class TestWebHDFS method testGetFileBlockLocationsBackwardsCompatibility.
@Test
public void testGetFileBlockLocationsBackwardsCompatibility() throws Exception {
final Configuration conf = WebHdfsTestUtil.createConf();
final String pathSpec = WebHdfsFileSystem.PATH_PREFIX + "/*";
HttpServer2 http = null;
try {
http = HttpServerFunctionalTest.createTestServer(conf);
http.addServlet("test", pathSpec, MockWebHdfsServlet.class);
http.start();
// Write the address back to configuration so
// WebHdfsFileSystem could connect to the mock server
conf.set(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY, "localhost:" + http.getConnectorAddress(0).getPort());
final WebHdfsFileSystem webFS = WebHdfsTestUtil.getWebHdfsFileSystem(conf, WebHdfsConstants.WEBHDFS_SCHEME);
WebHdfsFileSystem spyFs = spy(webFS);
BlockLocation[] locations = spyFs.getFileBlockLocations(new Path("p"), 0, 100);
// Verify result
assertEquals(1, locations.length);
assertEquals(121, locations[0].getLength());
// Verify the fall back
// The function should be called exactly 2 times
// 1st time handles GETFILEBLOCKLOCATIONS and found it is not supported
// 2nd time fall back to handle GET_FILE_BLOCK_LOCATIONS
verify(spyFs, times(2)).getFileBlockLocations(any(), any(), anyLong(), anyLong());
// throw an exception.
try {
spyFs.getFileBlockLocations(new Path("p"), 0, 100);
} catch (Exception e) {
assertTrue(e instanceof IOException);
assertEquals(e.getMessage(), MockWebHdfsServlet.RANDOM_EXCEPTION_MSG);
// Totally this function has been called 3 times
verify(spyFs, times(3)).getFileBlockLocations(any(), any(), anyLong(), anyLong());
}
} finally {
if (http != null) {
http.stop();
}
}
}
use of org.apache.hadoop.http.HttpServer2 in project hadoop by apache.
the class NodeTimelineCollectorManager method startWebApp.
/**
* Launch the REST web server for this collector manager.
*/
private void startWebApp() {
Configuration conf = getConfig();
String bindAddress = conf.get(YarnConfiguration.TIMELINE_SERVICE_BIND_HOST, YarnConfiguration.DEFAULT_TIMELINE_SERVICE_BIND_HOST) + ":0";
try {
HttpServer2.Builder builder = new HttpServer2.Builder().setName("timeline").setConf(conf).addEndpoint(URI.create((YarnConfiguration.useHttps(conf) ? "https://" : "http://") + bindAddress));
timelineRestServer = builder.build();
// TODO: replace this by an authentication filter in future.
HashMap<String, String> options = new HashMap<>();
String username = conf.get(HADOOP_HTTP_STATIC_USER, DEFAULT_HADOOP_HTTP_STATIC_USER);
options.put(HADOOP_HTTP_STATIC_USER, username);
HttpServer2.defineFilter(timelineRestServer.getWebAppContext(), "static_user_filter_timeline", StaticUserWebFilter.StaticUserFilter.class.getName(), options, new String[] { "/*" });
timelineRestServer.addJerseyResourcePackage(TimelineCollectorWebService.class.getPackage().getName() + ";" + GenericExceptionHandler.class.getPackage().getName() + ";" + YarnJacksonJaxbJsonProvider.class.getPackage().getName(), "/*");
timelineRestServer.setAttribute(COLLECTOR_MANAGER_ATTR_KEY, this);
timelineRestServer.start();
} catch (Exception e) {
String msg = "The per-node collector webapp failed to start.";
LOG.error(msg, e);
throw new YarnRuntimeException(msg, e);
}
//TODO: We need to think of the case of multiple interfaces
this.timelineRestServerBindAddress = WebAppUtils.getResolvedAddress(timelineRestServer.getConnectorAddress(0));
LOG.info("Instantiated the per-node collector webapp at " + timelineRestServerBindAddress);
}
Aggregations