use of java.util.HashSet in project camel by apache.
the class HazelcastMapProducerForSpringTest method testGetAllEmptySet.
@Test
public void testGetAllEmptySet() {
Set<Object> l = new HashSet<Object>();
Map t = new HashMap();
t.put("key1", "value1");
t.put("key2", "value2");
t.put("key3", "value3");
when(map.getAll(anySet())).thenReturn(t);
template.sendBodyAndHeader("direct:getAll", null, HazelcastConstants.OBJECT_ID, l);
String body = consumer.receiveBody("seda:out", 5000, String.class);
verify(map).getAll(l);
assertTrue(body.contains("key1=value1"));
assertTrue(body.contains("key2=value2"));
assertTrue(body.contains("key3=value3"));
}
use of java.util.HashSet in project camel by apache.
the class HazelcastMapProducerForSpringTest method testGetAllOnlyOneKey.
@Test
public void testGetAllOnlyOneKey() {
Set<Object> l = new HashSet<Object>();
l.add("key1");
Map t = new HashMap();
t.put("key1", "value1");
when(map.getAll(l)).thenReturn(t);
template.sendBodyAndHeader("direct:getAll", null, HazelcastConstants.OBJECT_ID, l);
String body = consumer.receiveBody("seda:out", 5000, String.class);
verify(map).getAll(l);
assertEquals("{key1=value1}", body);
}
use of java.util.HashSet in project camel by apache.
the class HdfsProducerConsumerIntegrationTest method testMultipleConsumers.
@Test
public // see https://issues.apache.org/jira/browse/CAMEL-7318
void testMultipleConsumers() throws Exception {
Path p = new Path("hdfs://localhost:9000/tmp/test/multiple-consumers");
FileSystem fs = FileSystem.get(p.toUri(), new Configuration());
fs.mkdirs(p);
for (int i = 1; i <= ITERATIONS; i++) {
FSDataOutputStream os = fs.create(new Path(p, String.format("file-%03d.txt", i)));
os.write(String.format("hello (%03d)\n", i).getBytes());
os.close();
}
final Set<String> fileNames = new HashSet<String>();
final CountDownLatch latch = new CountDownLatch(ITERATIONS);
MockEndpoint resultEndpoint = context.getEndpoint("mock:result", MockEndpoint.class);
resultEndpoint.whenAnyExchangeReceived(new Processor() {
@Override
public void process(Exchange exchange) throws Exception {
fileNames.add(exchange.getIn().getHeader(Exchange.FILE_NAME, String.class));
latch.countDown();
}
});
context.addRoutes(new RouteBuilder() {
@Override
public void configure() {
// difference in chunkSize only to allow multiple consumers
from("hdfs2://localhost:9000/tmp/test/multiple-consumers?pattern=*.txt&fileSystemType=HDFS&chunkSize=128").to("mock:result");
from("hdfs2://localhost:9000/tmp/test/multiple-consumers?pattern=*.txt&fileSystemType=HDFS&chunkSize=256").to("mock:result");
from("hdfs2://localhost:9000/tmp/test/multiple-consumers?pattern=*.txt&fileSystemType=HDFS&chunkSize=512").to("mock:result");
from("hdfs2://localhost:9000/tmp/test/multiple-consumers?pattern=*.txt&fileSystemType=HDFS&chunkSize=1024").to("mock:result");
}
});
context.start();
resultEndpoint.expectedMessageCount(ITERATIONS);
latch.await(30, TimeUnit.SECONDS);
resultEndpoint.assertIsSatisfied();
assertThat(fileNames.size(), equalTo(ITERATIONS));
}
use of java.util.HashSet in project camel by apache.
the class IgniteEventsEndpoint method setEvents.
/**
* Sets the event types to subscribe to as a comma-separated string of event constants as defined in {@link EventType}.
* <p>
* For example: EVT_CACHE_ENTRY_CREATED,EVT_CACHE_OBJECT_REMOVED,EVT_IGFS_DIR_CREATED.
*
* @param events
*/
public void setEvents(String events) {
this.events = new HashSet<>();
Set<String> requestedEvents = new HashSet<>(Arrays.asList(events.toUpperCase().split(",")));
Field[] fields = EventType.class.getDeclaredFields();
for (Field field : fields) {
if (!requestedEvents.contains(field.getName())) {
continue;
}
try {
this.events.add(field.getInt(null));
} catch (Exception e) {
throw new IllegalArgumentException("Problem while resolving event type. See stacktrace.", e);
}
}
}
use of java.util.HashSet in project camel by apache.
the class HttpProducerConcurrentTest method doSendMessages.
private void doSendMessages(int files, int poolSize) throws Exception {
getMockEndpoint("mock:result").expectedMessageCount(files);
getMockEndpoint("mock:result").assertNoDuplicates(body());
ExecutorService executor = Executors.newFixedThreadPool(poolSize);
// we access the responses Map below only inside the main thread,
// so no need for a thread-safe Map implementation
Map<Integer, Future<String>> responses = new HashMap<Integer, Future<String>>();
for (int i = 0; i < files; i++) {
final int index = i;
Future<String> out = executor.submit(new Callable<String>() {
public String call() throws Exception {
return template.requestBody("http://localhost:{{port}}/echo", "" + index, String.class);
}
});
responses.put(index, out);
}
assertMockEndpointsSatisfied();
assertEquals(files, responses.size());
// get all responses
Set<String> unique = new HashSet<String>();
for (Future<String> future : responses.values()) {
unique.add(future.get());
}
// should be 'files' unique responses
assertEquals("Should be " + files + " unique responses", files, unique.size());
executor.shutdownNow();
}
Aggregations