use of org.eclipse.jetty.server.Server in project camel by apache.
the class WebsocketCamelRouterTestSupport method setUp.
@Before
public void setUp() throws Exception {
server = new Server(PORT);
ServletContextHandler context = new ServletContextHandler(ServletContextHandler.SESSIONS);
context.setContextPath("/");
server.setHandler(context);
servletHolder = new ServletHolder(new CamelWebSocketServlet());
servletHolder.setName("CamelWsServlet");
context.addServlet(servletHolder, "/*");
server.start();
if (startCamelContext) {
super.setUp();
}
}
use of org.eclipse.jetty.server.Server in project camel by apache.
the class WsProducerTestBase method startTestServer.
public void startTestServer() throws Exception {
// start a simple websocket echo service
server = new Server(PORT);
Connector connector = getConnector();
server.addConnector(connector);
ServletContextHandler ctx = new ServletContextHandler();
ctx.setContextPath("/");
ctx.addServlet(TestServletFactory.class.getName(), "/*");
server.setHandler(ctx);
server.start();
assertTrue(server.isStarted());
}
use of org.eclipse.jetty.server.Server in project camel by apache.
the class CamelComponentVerifierTest method setUp.
@Before
@Override
public void setUp() throws Exception {
localServer = new Server(PORT);
localServer.setHandler(handlers(contextHandler("/basic", new BasicValidationHandler("GET", null, null, getExpectedContent()))));
localServer.start();
super.setUp();
}
use of org.eclipse.jetty.server.Server in project camel by apache.
the class JettyHttpComponent method createServer.
protected Server createServer() {
Server s = null;
ThreadPool tp = threadPool;
QueuedThreadPool qtp = null;
// configure thread pool if min/max given
if (minThreads != null || maxThreads != null) {
if (getThreadPool() != null) {
throw new IllegalArgumentException("You cannot configure both minThreads/maxThreads and a custom threadPool on JettyHttpComponent: " + this);
}
qtp = new QueuedThreadPool();
if (minThreads != null) {
qtp.setMinThreads(minThreads.intValue());
}
if (maxThreads != null) {
qtp.setMaxThreads(maxThreads.intValue());
}
tp = qtp;
}
if (tp != null) {
try {
if (!Server.getVersion().startsWith("8")) {
s = Server.class.getConstructor(ThreadPool.class).newInstance(tp);
} else {
s = new Server();
if (isEnableJmx()) {
enableJmx(s);
}
Server.class.getMethod("setThreadPool", ThreadPool.class).invoke(s, tp);
}
} catch (Exception e) {
//ignore
}
}
if (s == null) {
s = new Server();
}
if (qtp != null) {
// let the thread names indicate they are from the server
qtp.setName("CamelJettyServer(" + ObjectHelper.getIdentityHashCode(s) + ")");
try {
qtp.start();
} catch (Exception e) {
throw new RuntimeCamelException("Error starting JettyServer thread pool: " + qtp, e);
}
}
ContextHandlerCollection collection = new ContextHandlerCollection();
s.setHandler(collection);
// setup the error handler if it set to Jetty component
if (getErrorHandler() != null) {
s.addBean(getErrorHandler());
} else if (!Server.getVersion().startsWith("8")) {
//need an error handler that won't leak information about the exception
//back to the client.
ErrorHandler eh = new ErrorHandler() {
public void handle(String target, Request baseRequest, HttpServletRequest request, HttpServletResponse response) throws IOException {
String msg = HttpStatus.getMessage(response.getStatus());
request.setAttribute(RequestDispatcher.ERROR_MESSAGE, msg);
if (response instanceof Response) {
//need to use the deprecated method to support compiling with Jetty 8
((Response) response).setStatus(response.getStatus(), msg);
}
super.handle(target, baseRequest, request, response);
}
protected void writeErrorPage(HttpServletRequest request, Writer writer, int code, String message, boolean showStacks) throws IOException {
super.writeErrorPage(request, writer, code, message, false);
}
};
s.addBean(eh, false);
}
return s;
}
use of org.eclipse.jetty.server.Server in project hadoop by apache.
the class TestHttpFSServerNoACLs method createHttpFSServer.
/**
* Create an HttpFS Server to talk to the MiniDFSCluster we created.
* @throws Exception
*/
private void createHttpFSServer() throws Exception {
File homeDir = TestDirHelper.getTestDir();
Assert.assertTrue(new File(homeDir, "conf").mkdir());
Assert.assertTrue(new File(homeDir, "log").mkdir());
Assert.assertTrue(new File(homeDir, "temp").mkdir());
HttpFSServerWebApp.setHomeDirForCurrentThread(homeDir.getAbsolutePath());
File secretFile = new File(new File(homeDir, "conf"), "secret");
Writer w = new FileWriter(secretFile);
w.write("secret");
w.close();
// HDFS configuration
File hadoopConfDir = new File(new File(homeDir, "conf"), "hadoop-conf");
if (!hadoopConfDir.mkdirs()) {
throw new IOException();
}
String fsDefaultName = nnConf.get(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY);
Configuration conf = new Configuration(false);
conf.set(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY, fsDefaultName);
// Explicitly turn off ACLs, just in case the default becomes true later
conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY, false);
File hdfsSite = new File(hadoopConfDir, "hdfs-site.xml");
OutputStream os = new FileOutputStream(hdfsSite);
conf.writeXml(os);
os.close();
// HTTPFS configuration
conf = new Configuration(false);
conf.set("httpfs.hadoop.config.dir", hadoopConfDir.toString());
conf.set("httpfs.proxyuser." + HadoopUsersConfTestHelper.getHadoopProxyUser() + ".groups", HadoopUsersConfTestHelper.getHadoopProxyUserGroups());
conf.set("httpfs.proxyuser." + HadoopUsersConfTestHelper.getHadoopProxyUser() + ".hosts", HadoopUsersConfTestHelper.getHadoopProxyUserHosts());
conf.set("httpfs.authentication.signature.secret.file", secretFile.getAbsolutePath());
File httpfsSite = new File(new File(homeDir, "conf"), "httpfs-site.xml");
os = new FileOutputStream(httpfsSite);
conf.writeXml(os);
os.close();
ClassLoader cl = Thread.currentThread().getContextClassLoader();
URL url = cl.getResource("webapp");
if (url == null) {
throw new IOException();
}
WebAppContext context = new WebAppContext(url.getPath(), "/webhdfs");
Server server = TestJettyHelper.getJettyServer();
server.setHandler(context);
server.start();
}
Aggregations