use of java.util.concurrent.ConcurrentHashMap in project druid by druid-io.
the class KafkaLookupExtractorFactoryTest method testStartStartStop.
@Test
public void testStartStartStop() {
final KafkaStream<String, String> kafkaStream = PowerMock.createStrictMock(KafkaStream.class);
final ConsumerIterator<String, String> consumerIterator = PowerMock.createStrictMock(ConsumerIterator.class);
final ConsumerConnector consumerConnector = PowerMock.createStrictMock(ConsumerConnector.class);
EasyMock.expect(consumerConnector.createMessageStreamsByFilter(EasyMock.anyObject(TopicFilter.class), EasyMock.anyInt(), EasyMock.eq(DEFAULT_STRING_DECODER), EasyMock.eq(DEFAULT_STRING_DECODER))).andReturn(ImmutableList.of(kafkaStream)).once();
EasyMock.expect(kafkaStream.iterator()).andReturn(consumerIterator).anyTimes();
EasyMock.expect(consumerIterator.hasNext()).andAnswer(getBlockingAnswer()).anyTimes();
EasyMock.expect(cacheManager.createCache()).andReturn(cacheHandler).once();
EasyMock.expect(cacheHandler.getCache()).andReturn(new ConcurrentHashMap<String, String>()).once();
cacheHandler.close();
EasyMock.expectLastCall().once();
consumerConnector.shutdown();
EasyMock.expectLastCall().times(3);
PowerMock.replay(cacheManager, cacheHandler, kafkaStream, consumerConnector, consumerIterator);
final KafkaLookupExtractorFactory factory = new KafkaLookupExtractorFactory(cacheManager, TOPIC, ImmutableMap.of("zookeeper.connect", "localhost"), 10_000L, false) {
@Override
ConsumerConnector buildConnector(Properties properties) {
return consumerConnector;
}
};
Assert.assertTrue(factory.start());
Assert.assertTrue(factory.start());
Assert.assertTrue(factory.close());
Assert.assertTrue(factory.close());
PowerMock.verify(cacheManager, cacheHandler);
}
use of java.util.concurrent.ConcurrentHashMap in project jetty.project by eclipse.
the class OverlayedAppProvider method redeploy.
/* ------------------------------------------------------------ */
/**
* Walks the defined webapps, templates, nodes and instances to
* determine what should be deployed, then adjust reality to match.
*/
protected void redeploy() {
Map<String, Template> templates = new ConcurrentHashMap<String, Template>();
// Check for duplicate templates
for (Template template : _templates.values()) {
Template other = templates.get(template.getTemplateName());
if (other != null) {
__log.warn("Multiple Templates: {} & {}", template.getName(), other.getName());
if (other.getName().compareToIgnoreCase(template.getName()) <= 0)
continue;
}
templates.put(template.getTemplateName(), template);
}
// Match webapps to templates
for (Template template : templates.values()) {
String webappname = template.getClassifier();
if (webappname == null)
continue;
Webapp webapp = _webapps.get(webappname);
if (webapp == null) {
__log.warn("No webapp found for template: {}", template.getName());
templates.remove(template.getTemplateName());
} else {
template.setWebapp(webapp);
}
}
// Match instance to templates and check if what needs to be deployed or undeployed.
Set<String> deployed = new HashSet<String>();
List<Instance> deploy = new ArrayList<Instance>();
for (Instance instance : _instances.values()) {
Template template = templates.get(instance.getTemplateName());
instance.setTemplate(template);
if (template != null) {
String key = instance.getInstanceKey();
App app = _deployed.get(key);
if (app == null)
deploy.add(instance);
else
deployed.add(key);
}
}
// Look for deployed apps that need to be undeployed
List<String> undeploy = new ArrayList<String>();
for (String key : _deployed.keySet()) {
if (!deployed.contains(key))
undeploy.add(key);
}
// Do the undeploys
for (String key : undeploy) {
App app = _deployed.remove(key);
if (app != null) {
__log.info("Undeploy {}", key);
_deploymentManager.removeApp(app);
}
}
// ready the deploys
for (Instance instance : deploy) {
String key = instance.getInstanceKey();
OverlayedApp app = new OverlayedApp(_deploymentManager, this, key, instance);
_deployed.put(key, app);
}
// Remove unused Shared stuff
Set<String> sharedKeys = new HashSet<String>(_shared.keySet());
for (OverlayedApp app : _deployed.values()) {
Instance instance = app.getInstance();
sharedKeys.remove(instance.getSharedKey());
}
for (String sharedKey : sharedKeys) {
__log.debug("Remove " + sharedKey);
TemplateContext shared = _shared.remove(sharedKey);
if (shared != null) {
try {
shared.stop();
} catch (Exception e) {
__log.warn(e);
}
shared.destroy();
}
}
// Do the deploys
for (Instance instance : deploy) {
String key = instance.getInstanceKey();
OverlayedApp app = _deployed.get(key);
__log.info("Deploy {}", key);
_deploymentManager.addApp(app);
}
}
use of java.util.concurrent.ConcurrentHashMap in project vert.x by eclipse.
the class DeploymentTest method testIsolationGroup.
// TODO
// Multi-threaded workers
private void testIsolationGroup(String group1, String group2, int count1, int count2, List<String> isolatedClasses, String verticleID) throws Exception {
Map<String, Integer> countMap = new ConcurrentHashMap<>();
vertx.eventBus().<JsonObject>consumer("testcounts").handler((Message<JsonObject> msg) -> {
countMap.put(msg.body().getString("deploymentID"), msg.body().getInteger("count"));
});
CountDownLatch latch = new CountDownLatch(1);
AtomicReference<String> deploymentID1 = new AtomicReference<>();
AtomicReference<String> deploymentID2 = new AtomicReference<>();
vertx.deployVerticle(verticleID, new DeploymentOptions().setIsolationGroup(group1).setIsolatedClasses(isolatedClasses), ar -> {
assertTrue(ar.succeeded());
deploymentID1.set(ar.result());
assertEquals(0, TestVerticle.instanceCount.get());
vertx.deployVerticle(verticleID, new DeploymentOptions().setIsolationGroup(group2).setIsolatedClasses(isolatedClasses), ar2 -> {
assertTrue(ar2.succeeded());
deploymentID2.set(ar2.result());
assertEquals(0, TestVerticle.instanceCount.get());
latch.countDown();
});
});
awaitLatch(latch);
// Wait until two entries in the map
waitUntil(() -> countMap.size() == 2);
assertEquals(count1, countMap.get(deploymentID1.get()).intValue());
assertEquals(count2, countMap.get(deploymentID2.get()).intValue());
}
use of java.util.concurrent.ConcurrentHashMap in project jetty.project by eclipse.
the class MetaInfConfiguration method scanForFragment.
/**
* Scan for META-INF/web-fragment.xml file in the given jar.
*
* @param context the context for the scan
* @param jar the jar resource to scan for fragements in
* @param cache the resource cache
* @throws Exception if unable to scan for fragments
*/
public void scanForFragment(WebAppContext context, Resource jar, ConcurrentHashMap<Resource, Resource> cache) throws Exception {
Resource webFrag = null;
if (cache != null && cache.containsKey(jar)) {
webFrag = cache.get(jar);
if (webFrag == EmptyResource.INSTANCE) {
if (LOG.isDebugEnabled())
LOG.debug(jar + " cached as containing no META-INF/web-fragment.xml");
return;
} else if (LOG.isDebugEnabled())
LOG.debug(jar + " META-INF/web-fragment.xml found in cache ");
} else {
//not using caches or not in the cache so check for the web-fragment.xml
if (LOG.isDebugEnabled())
LOG.debug(jar + " META-INF/web-fragment.xml checked");
if (jar.isDirectory()) {
//TODO ????
webFrag = jar.addPath("/META-INF/web-fragment.xml");
} else {
URI uri = jar.getURI();
webFrag = Resource.newResource(uriJarPrefix(uri, "!/META-INF/web-fragment.xml"));
}
if (!webFrag.exists() || webFrag.isDirectory()) {
webFrag.close();
webFrag = EmptyResource.INSTANCE;
}
if (cache != null) {
//web-fragment.xml doesn't exist: put token in cache to signal we've seen the jar
Resource old = cache.putIfAbsent(jar, webFrag);
if (old != null)
webFrag = old;
else if (LOG.isDebugEnabled())
LOG.debug(jar + " META-INF/web-fragment.xml cache updated");
}
if (webFrag == EmptyResource.INSTANCE)
return;
}
Map<Resource, Resource> fragments = (Map<Resource, Resource>) context.getAttribute(METAINF_FRAGMENTS);
if (fragments == null) {
fragments = new HashMap<Resource, Resource>();
context.setAttribute(METAINF_FRAGMENTS, fragments);
}
fragments.put(jar, webFrag);
if (LOG.isDebugEnabled())
LOG.debug(webFrag + " added to context");
}
use of java.util.concurrent.ConcurrentHashMap in project vert.x by eclipse.
the class Http1xTest method testSharedServersRoundRobin.
@Test
public void testSharedServersRoundRobin() throws Exception {
client.close();
server.close();
client = vertx.createHttpClient(new HttpClientOptions().setKeepAlive(false));
int numServers = 5;
int numRequests = numServers * 100;
List<HttpServer> servers = new ArrayList<>();
Set<HttpServer> connectedServers = Collections.newSetFromMap(new ConcurrentHashMap<>());
Map<HttpServer, Integer> requestCount = new ConcurrentHashMap<>();
CountDownLatch latchListen = new CountDownLatch(numServers);
CountDownLatch latchConns = new CountDownLatch(numRequests);
Set<Context> contexts = new ConcurrentHashSet<>();
for (int i = 0; i < numServers; i++) {
HttpServer theServer = vertx.createHttpServer(new HttpServerOptions().setPort(DEFAULT_HTTP_PORT));
servers.add(theServer);
final AtomicReference<Context> context = new AtomicReference<>();
theServer.requestHandler(req -> {
Context ctx = Vertx.currentContext();
if (context.get() != null) {
assertSame(ctx, context.get());
} else {
context.set(ctx);
contexts.add(ctx);
}
connectedServers.add(theServer);
Integer cnt = requestCount.get(theServer);
int icnt = cnt == null ? 0 : cnt;
icnt++;
requestCount.put(theServer, icnt);
latchConns.countDown();
req.response().end();
}).listen(onSuccess(s -> {
assertEquals(DEFAULT_HTTP_PORT, s.actualPort());
latchListen.countDown();
}));
}
awaitLatch(latchListen);
// Create a bunch of connections
CountDownLatch latchClient = new CountDownLatch(numRequests);
for (int i = 0; i < numRequests; i++) {
client.request(HttpMethod.GET, DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, DEFAULT_TEST_URI, res -> latchClient.countDown()).end();
}
assertTrue(latchClient.await(10, TimeUnit.SECONDS));
assertTrue(latchConns.await(10, TimeUnit.SECONDS));
assertEquals(numServers, connectedServers.size());
for (HttpServer server : servers) {
assertTrue(connectedServers.contains(server));
}
assertEquals(numServers, requestCount.size());
for (int cnt : requestCount.values()) {
assertEquals(numRequests / numServers, cnt);
}
assertEquals(numServers, contexts.size());
CountDownLatch closeLatch = new CountDownLatch(numServers);
for (HttpServer server : servers) {
server.close(ar -> {
assertTrue(ar.succeeded());
closeLatch.countDown();
});
}
assertTrue(closeLatch.await(10, TimeUnit.SECONDS));
testComplete();
}
Aggregations