use of org.apache.knox.gateway.topology.Topology in project knox by apache.
the class EncryptUriDeploymentContributorTest method testDeployment.
@Test
public void testDeployment() throws IOException {
WebArchive webArchive = ShrinkWrap.create(WebArchive.class, "test-acrhive");
Provider provider = new Provider();
provider.setEnabled(true);
provider.setName(EncryptUriDeploymentContributor.PROVIDER_ROLE_NAME);
Topology topology = new Topology();
topology.setName("Sample");
DeploymentContext context = EasyMock.createNiceMock(DeploymentContext.class);
EasyMock.expect(context.getWebArchive()).andReturn(webArchive).anyTimes();
EasyMock.expect(context.getTopology()).andReturn(topology).anyTimes();
EasyMock.replay(context);
AliasService as = EasyMock.createNiceMock(AliasService.class);
CryptoService cryptoService = new DefaultCryptoService();
((DefaultCryptoService) cryptoService).setAliasService(as);
GatewayServices gatewayServices = EasyMock.createNiceMock(GatewayServices.class);
EasyMock.expect(gatewayServices.getService(GatewayServices.CRYPTO_SERVICE)).andReturn(cryptoService).anyTimes();
UrlRewriteEnvironment encEnvironment = EasyMock.createNiceMock(UrlRewriteEnvironment.class);
EasyMock.expect(encEnvironment.getAttribute(GatewayServices.GATEWAY_SERVICES_ATTRIBUTE)).andReturn(gatewayServices).anyTimes();
EncryptUriDeploymentContributor contributor = new EncryptUriDeploymentContributor();
contributor.setAliasService(as);
assertThat(contributor.getRole(), is(EncryptUriDeploymentContributor.PROVIDER_ROLE_NAME));
assertThat(contributor.getName(), is(EncryptUriDeploymentContributor.PROVIDER_IMPL_NAME));
// Just make sure it doesn't blow up.
contributor.contributeFilter(null, null, null, null, null);
// Just make sure it doesn't blow up.
contributor.initializeContribution(context);
contributor.contributeProvider(context, provider);
// Just make sure it doesn't blow up.
contributor.finalizeContribution(context);
}
use of org.apache.knox.gateway.topology.Topology in project knox by apache.
the class SecureQueryDeploymentContributorTest method testDeployment.
@Test
public void testDeployment() throws IOException {
WebArchive webArchive = ShrinkWrap.create(WebArchive.class, "test-acrhive");
// UrlRewriteRulesDescriptorImpl rewriteRules = new UrlRewriteRulesDescriptorImpl();
Map<String, String> providerParams = new HashMap<>();
// providerParams.put( "test-host-external", "test-host-internal" );
Provider provider = new Provider();
provider.setEnabled(true);
provider.setName("secure-query");
provider.setParams(providerParams);
Topology topology = new Topology();
topology.setName("Sample");
DeploymentContext context = EasyMock.createNiceMock(DeploymentContext.class);
// EasyMock.expect( context.getDescriptor( "rewrite" ) ).andReturn( rewriteRules ).anyTimes();
EasyMock.expect(context.getWebArchive()).andReturn(webArchive).anyTimes();
EasyMock.expect(context.getTopology()).andReturn(topology).anyTimes();
EasyMock.replay(context);
AliasService as = EasyMock.createNiceMock(AliasService.class);
CryptoService cryptoService = new DefaultCryptoService();
((DefaultCryptoService) cryptoService).setAliasService(as);
GatewayServices gatewayServices = EasyMock.createNiceMock(GatewayServices.class);
EasyMock.expect(gatewayServices.getService(GatewayServices.CRYPTO_SERVICE)).andReturn(cryptoService).anyTimes();
UrlRewriteEnvironment encEnvironment = EasyMock.createNiceMock(UrlRewriteEnvironment.class);
EasyMock.expect(encEnvironment.getAttribute(GatewayServices.GATEWAY_SERVICES_ATTRIBUTE)).andReturn(gatewayServices).anyTimes();
SecureQueryDeploymentContributor contributor = new SecureQueryDeploymentContributor();
contributor.setAliasService(as);
assertThat(contributor.getRole(), is("secure-query"));
assertThat(contributor.getName(), is("default"));
// Just make sure it doesn't blow up.
contributor.contributeFilter(null, null, null, null, null);
// Just make sure it doesn't blow up.
contributor.initializeContribution(context);
contributor.contributeProvider(context, provider);
// HostmapFunctionDescriptor funcDesc = rewriteRules.getFunction( "hostmap" );
// assertThat( funcDesc.config(), is( "/WEB-INF/hostmap.txt" ) );
//
// Node node = webArchive.get( "/WEB-INF/hostmap.txt" );
// String asset = IOUtils.toString( node.getAsset().openStream() );
// assertThat( asset, containsString( "test-host-external=test-host-internal" ) );
// Just make sure it doesn't blow up.
contributor.finalizeContribution(context);
}
use of org.apache.knox.gateway.topology.Topology in project knox by apache.
the class GatewayServer method start.
private synchronized void start() throws Exception {
// Create the global context handler.
contexts = new ContextHandlerCollection();
// A map to keep track of current deployments by cluster name.
deployments = new ConcurrentHashMap<>();
// Start Jetty.
jetty = new Server(new QueuedThreadPool(config.getThreadPoolMax()));
/* topologyName is null because all topology listen on this port */
jetty.addConnector(createConnector(jetty, config, config.getGatewayPort(), null));
// Add Annotations processing into the Jetty server to support JSPs
Configuration.ClassList classlist = Configuration.ClassList.setServerDefault(jetty);
classlist.addBefore("org.eclipse.jetty.webapp.JettyWebXmlConfiguration", "org.eclipse.jetty.annotations.AnnotationConfiguration");
// Load the current topologies.
File topologiesDir = calculateAbsoluteTopologiesDir();
log.loadingTopologiesFromDirectory(topologiesDir.getAbsolutePath());
monitor = services.getService(GatewayServices.TOPOLOGY_SERVICE);
monitor.addTopologyChangeListener(listener);
monitor.reloadTopologies();
final Collection<Topology> topologies = monitor.getTopologies();
final Map<String, Integer> topologyPortMap = config.getGatewayPortMappings();
// List of all the topology that are deployed
final List<String> deployedTopologyList = new ArrayList<String>();
for (final Topology t : topologies) {
deployedTopologyList.add(t.getName());
}
// Check whether the configured topologies for port mapping exist, if not
// log WARN message and continue
checkMappedTopologiesExist(topologyPortMap, deployedTopologyList);
final HandlerCollection handlers = createHandlers(config, services, contexts, topologyPortMap);
// Check whether a topology wants dedicated port,
// if yes then we create a connector that listens on the provided port.
log.gatewayTopologyPortMappingEnabled(config.isGatewayPortMappingEnabled());
if (config.isGatewayPortMappingEnabled()) {
for (Map.Entry<String, Integer> entry : topologyPortMap.entrySet()) {
// and NOT for Default Topology listening on standard gateway port.
if (deployedTopologyList.contains(entry.getKey()) && (entry.getValue().intValue() != config.getGatewayPort())) {
log.createJettyConnector(entry.getKey().toLowerCase(), entry.getValue());
jetty.addConnector(createConnector(jetty, config, entry.getValue(), entry.getKey().toLowerCase()));
}
}
}
jetty.setHandler(handlers);
try {
jetty.start();
} catch (IOException e) {
log.failedToStartGateway(e);
throw e;
}
cleanupTopologyDeployments();
// Start the topology monitor.
log.monitoringTopologyChangesInDirectory(topologiesDir.getAbsolutePath());
monitor.startMonitor();
}
use of org.apache.knox.gateway.topology.Topology in project knox by apache.
the class DeploymentFactory method contribute.
private static void contribute(DeploymentContext context, Map<String, List<ProviderDeploymentContributor>> providers, Map<String, List<ServiceDeploymentContributor>> services, Map.Entry<String, ServiceDeploymentContributor> applications) {
Topology topology = context.getTopology();
contributeProviders(context, topology, providers);
contributeServices(context, topology, services);
contributeApplications(context, topology, applications);
}
use of org.apache.knox.gateway.topology.Topology in project knox by apache.
the class TopologyRulesModuleTest method testParseSimpleTopologyXmlInHadoopFormat.
@Test
public void testParseSimpleTopologyXmlInHadoopFormat() throws IOException, SAXException, URISyntaxException {
Digester digester = loader.newDigester();
String name = "org/apache/knox/gateway/topology/xml/simple-topology-ambari-format.conf";
URL url = ClassLoader.getSystemResource(name);
assertThat("Failed to find URL for resource " + name, url, notNullValue());
File file = new File(url.getFile());
TopologyBuilder topologyBuilder = digester.parse(url);
Topology topology = topologyBuilder.build();
assertThat("Failed to parse resource " + name, topology, notNullValue());
topology.setTimestamp(file.lastModified());
assertThat(topology.getName(), is("topology2"));
assertThat(topology.getTimestamp(), is(file.lastModified()));
assertThat(topology.getServices().size(), is(4));
assertThat(topology.getProviders().size(), is(2));
Service webhdfsService = topology.getService("WEBHDFS", null, null);
assertThat(webhdfsService, notNullValue());
assertThat(webhdfsService.getRole(), is("WEBHDFS"));
assertThat(webhdfsService.getName(), nullValue());
assertThat(webhdfsService.getUrls().size(), is(2));
assertThat(webhdfsService.getUrls(), hasItem("http://host1:50070/webhdfs"));
assertThat(webhdfsService.getUrls(), hasItem("http://host2:50070/webhdfs"));
Service webhcatService = topology.getService("WEBHCAT", null, null);
assertThat(webhcatService, notNullValue());
assertThat(webhcatService.getRole(), is("WEBHCAT"));
assertThat(webhcatService.getName(), nullValue());
assertThat(webhcatService.getUrls().size(), is(1));
assertThat(webhcatService.getUrls(), hasItem("http://host:50111/templeton"));
Service oozieService = topology.getService("OOZIE", null, null);
assertThat(oozieService, notNullValue());
assertThat(oozieService.getRole(), is("OOZIE"));
assertThat(oozieService.getName(), nullValue());
assertThat(webhcatService.getUrls().size(), is(1));
assertThat(oozieService.getUrls(), hasItem("http://host:11000/oozie"));
Service hiveService = topology.getService("HIVE", null, null);
assertThat(hiveService, notNullValue());
assertThat(hiveService.getRole(), is("HIVE"));
assertThat(hiveService.getName(), nullValue());
assertThat(webhcatService.getUrls().size(), is(1));
assertThat(hiveService.getUrls(), hasItem("http://host:10000"));
Provider authenticationProvider = topology.getProvider("authentication", "ShiroProvider");
assertThat(authenticationProvider, notNullValue());
assertThat(authenticationProvider.isEnabled(), is(true));
assertThat(authenticationProvider.getRole(), is("authentication"));
assertThat(authenticationProvider.getName(), is("ShiroProvider"));
assertThat(authenticationProvider.getParams().size(), is(5));
assertThat(authenticationProvider.getParams().get("main.ldapRealm.contextFactory.url"), is("ldap://localhost:33389"));
Provider identityAssertionProvider = topology.getProvider("identity-assertion", "Default");
assertThat(identityAssertionProvider, notNullValue());
assertThat(identityAssertionProvider.isEnabled(), is(false));
assertThat(identityAssertionProvider.getRole(), is("identity-assertion"));
assertThat(identityAssertionProvider.getName(), is("Default"));
assertThat(identityAssertionProvider.getParams().size(), is(2));
assertThat(identityAssertionProvider.getParams().get("name"), is("user.name"));
}
Aggregations