use of com.hazelcast.map.IMap in project hazelcast by hazelcast.
the class IOBalancerStressTest method testEachConnectionUseDifferentOwnerEventually.
@Test
public void testEachConnectionUseDifferentOwnerEventually() {
Config config = new Config().setProperty(ClusterProperty.IO_BALANCER_INTERVAL_SECONDS.getName(), "1").setProperty(ClusterProperty.IO_THREAD_COUNT.getName(), "4");
HazelcastInstance instance1 = Hazelcast.newHazelcastInstance(config);
HazelcastInstance instance2 = Hazelcast.newHazelcastInstance(config);
HazelcastInstance instance3 = Hazelcast.newHazelcastInstance(config);
instance2.shutdown();
instance2 = Hazelcast.newHazelcastInstance(config);
// prerecord pipelines load, grouped by the owner-thread, before start the load
Map<NioThread, Map<MigratablePipeline, Long>> pipelinesLoadPerOwnerBeforeLoad1 = getPipelinesLoadPerOwner(instance1);
Map<NioThread, Map<MigratablePipeline, Long>> pipelinesLoadPerOwnerBeforeLoad2 = getPipelinesLoadPerOwner(instance2);
Map<NioThread, Map<MigratablePipeline, Long>> pipelinesLoadPerOwnerBeforeLoad3 = getPipelinesLoadPerOwner(instance3);
IMap<Integer, Integer> map = instance1.getMap(randomMapName());
for (int i = 0; i < 10000; i++) {
map.put(i, i);
}
assertBalanced(pipelinesLoadPerOwnerBeforeLoad1, instance1);
assertBalanced(pipelinesLoadPerOwnerBeforeLoad2, instance2);
assertBalanced(pipelinesLoadPerOwnerBeforeLoad3, instance3);
}
use of com.hazelcast.map.IMap in project hazelcast by hazelcast.
the class AbstractGenericRecordIntegrationTest method testEntryProcessorReturnsGenericRecord.
@Test
public void testEntryProcessorReturnsGenericRecord() {
HazelcastInstance[] instances = createCluster();
HazelcastInstance instance = createAccessorInstance(serializationConfig);
IMap<Object, Object> map = instance.getMap("test");
NamedPortable expected = new NamedPortable("foo", 900);
String key = generateKeyOwnedBy(instances[0]);
map.put(key, expected);
Object returnValue = map.executeOnKey(key, (EntryProcessor<Object, Object, Object>) entry -> {
Object value = entry.getValue();
GenericRecord genericRecord = (GenericRecord) value;
GenericRecord modifiedGenericRecord = genericRecord.newBuilder().setString("name", "bar").setInt32("myint", 4).build();
entry.setValue(modifiedGenericRecord);
return genericRecord.getInt32("myint");
});
assertEquals(expected.myint, returnValue);
NamedPortable actualPortable = (NamedPortable) map.get(key);
assertEquals("bar", actualPortable.name);
assertEquals(4, actualPortable.myint);
}
use of com.hazelcast.map.IMap in project hazelcast by hazelcast.
the class CompactFormatIntegrationTest method testEntryProcessor.
@Test
public void testEntryProcessor() {
IMap<Integer, Object> map = instance1.getMap("test");
for (int i = 0; i < 100; i++) {
if (serverDoesNotHaveClasses) {
GenericRecord record = GenericRecordBuilder.compact("employee").setInt32("age", i).setInt64("id", 102310312).build();
map.put(i, record);
} else {
EmployeeDTO employeeDTO = new EmployeeDTO(i, 102310312);
map.put(i, employeeDTO);
}
}
IMap map2 = instance2.getMap("test");
if (serverDoesNotHaveClasses) {
map2.executeOnEntries(new GenericIncreaseAgeEntryProcessor());
} else {
map2.executeOnEntries(new IncreaseAgeEntryProcessor());
}
for (int i = 0; i < 100; i++) {
if (serverDoesNotHaveClasses) {
GenericRecord record = (GenericRecord) map2.get(i);
assertEquals(record.getInt32("age"), 1000 + i);
} else {
EmployeeDTO employeeDTO = (EmployeeDTO) map.get(i);
assertEquals(employeeDTO.getAge(), 1000 + i);
}
}
}
use of com.hazelcast.map.IMap in project hazelcast by hazelcast.
the class ExecutionLifecycleTest method when_job_withNoSnapshots_completed_then_noSnapshotMapsLeft.
@Test
public void when_job_withNoSnapshots_completed_then_noSnapshotMapsLeft() {
HazelcastInstance instance = createHazelcastInstance();
DAG dag = new DAG();
dag.newVertex("noop", Processors.noopP());
newJob(instance, dag, null).join();
Collection<DistributedObject> objects = instance.getDistributedObjects();
long snapshotMaps = objects.stream().filter(obj -> obj instanceof IMap).filter(obj -> obj.getName().contains("snapshots.data")).count();
assertEquals(0, snapshotMaps);
}
use of com.hazelcast.map.IMap in project hazelcast by hazelcast.
the class JobRestartWithSnapshotTest method when_nodeDown_then_jobRestartsFromSnapshot.
@SuppressWarnings("unchecked")
private void when_nodeDown_then_jobRestartsFromSnapshot(boolean twoStage) throws Exception {
/*
Design of this test:
It uses a random partitioned generator of source events. The events are
Map.Entry(partitionId, timestamp). For each partition timestamps from
0..elementsInPartition are generated.
We start the test with two nodes and localParallelism(1) and 3 partitions
for source. Source instances generate items at the same rate of 10 per
second: this causes one instance to be twice as fast as the other in terms of
timestamp. The source processor saves partition offsets similarly to how
KafkaSources.kafka() and Sources.mapJournal() do.
After some time we shut down one instance. The job restarts from the
snapshot and all partitions are restored to single source processor
instance. Partition offsets are very different, so the source is written
in a way that it emits from the most-behind partition in order to not
emit late events from more ahead partitions.
Local parallelism of InsertWatermarkP is also 1 to avoid the edge case
when different instances of InsertWatermarkP might initialize with first
event in different frame and make them start the no-gap emission from
different WM, which might cause the SlidingWindowP downstream to miss
some of the first windows.
The sink writes to an IMap which is an idempotent sink.
The resulting contents of the sink map are compared to expected value.
*/
DAG dag = new DAG();
SlidingWindowPolicy wDef = SlidingWindowPolicy.tumblingWinPolicy(3);
AggregateOperation1<Object, LongAccumulator, Long> aggrOp = counting();
IMap<List<Long>, Long> result = instance1.getMap("result");
result.clear();
int numPartitions = 3;
int elementsInPartition = 250;
SupplierEx<Processor> sup = () -> new SequencesInPartitionsGeneratorP(numPartitions, elementsInPartition, true);
Vertex generator = dag.newVertex("generator", throttle(sup, 30)).localParallelism(1);
Vertex insWm = dag.newVertex("insWm", insertWatermarksP(eventTimePolicy(o -> ((Entry<Integer, Integer>) o).getValue(), limitingLag(0), wDef.frameSize(), wDef.frameOffset(), 0))).localParallelism(1);
Vertex map = dag.newVertex("map", mapP((KeyedWindowResult kwr) -> entry(asList(kwr.end(), (long) (int) kwr.key()), kwr.result())));
Vertex writeMap = dag.newVertex("writeMap", SinkProcessors.writeMapP("result"));
if (twoStage) {
Vertex aggregateStage1 = dag.newVertex("aggregateStage1", Processors.accumulateByFrameP(singletonList((FunctionEx<? super Object, ?>) t -> ((Entry<Integer, Integer>) t).getKey()), singletonList(t1 -> ((Entry<Integer, Integer>) t1).getValue()), TimestampKind.EVENT, wDef, aggrOp.withIdentityFinish()));
Vertex aggregateStage2 = dag.newVertex("aggregateStage2", combineToSlidingWindowP(wDef, aggrOp, KeyedWindowResult::new));
dag.edge(between(insWm, aggregateStage1).partitioned(entryKey())).edge(between(aggregateStage1, aggregateStage2).distributed().partitioned(entryKey())).edge(between(aggregateStage2, map));
} else {
Vertex aggregate = dag.newVertex("aggregate", Processors.aggregateToSlidingWindowP(singletonList((FunctionEx<Object, Integer>) t -> ((Entry<Integer, Integer>) t).getKey()), singletonList(t1 -> ((Entry<Integer, Integer>) t1).getValue()), TimestampKind.EVENT, wDef, 0L, aggrOp, KeyedWindowResult::new));
dag.edge(between(insWm, aggregate).distributed().partitioned(entryKey())).edge(between(aggregate, map));
}
dag.edge(between(generator, insWm)).edge(between(map, writeMap));
JobConfig config = new JobConfig();
config.setProcessingGuarantee(EXACTLY_ONCE);
config.setSnapshotIntervalMillis(1200);
Job job = instance1.getJet().newJob(dag, config);
JobRepository jobRepository = new JobRepository(instance1);
int timeout = (int) (MILLISECONDS.toSeconds(config.getSnapshotIntervalMillis() * 3) + 8);
waitForFirstSnapshot(jobRepository, job.getId(), timeout, false);
waitForNextSnapshot(jobRepository, job.getId(), timeout, false);
// wait a little more to emit something, so that it will be overwritten in the sink map
Thread.sleep(300);
instance2.getLifecycleService().terminate();
// Now the job should detect member shutdown and restart from snapshot.
// Let's wait until the next snapshot appears.
waitForNextSnapshot(jobRepository, job.getId(), (int) (MILLISECONDS.toSeconds(config.getSnapshotIntervalMillis()) + 10), false);
waitForNextSnapshot(jobRepository, job.getId(), timeout, false);
job.join();
// compute expected result
Map<List<Long>, Long> expectedMap = new HashMap<>();
for (long partition = 0; partition < numPartitions; partition++) {
long cnt = 0;
for (long value = 1; value <= elementsInPartition; value++) {
cnt++;
if (value % wDef.frameSize() == 0) {
expectedMap.put(asList(value, partition), cnt);
cnt = 0;
}
}
if (cnt > 0) {
expectedMap.put(asList(wDef.higherFrameTs(elementsInPartition - 1), partition), cnt);
}
}
// check expected result
if (!expectedMap.equals(result)) {
System.out.println("All expected entries: " + expectedMap.entrySet().stream().map(Object::toString).collect(joining(", ")));
System.out.println("All actual entries: " + result.entrySet().stream().map(Object::toString).collect(joining(", ")));
System.out.println("Non-received expected items: " + expectedMap.keySet().stream().filter(key -> !result.containsKey(key)).map(Object::toString).collect(joining(", ")));
System.out.println("Received non-expected items: " + result.entrySet().stream().filter(entry -> !expectedMap.containsKey(entry.getKey())).map(Object::toString).collect(joining(", ")));
System.out.println("Different keys: ");
for (Entry<List<Long>, Long> rEntry : result.entrySet()) {
Long expectedValue = expectedMap.get(rEntry.getKey());
if (expectedValue != null && !expectedValue.equals(rEntry.getValue())) {
System.out.println("key: " + rEntry.getKey() + ", expected value: " + expectedValue + ", actual value: " + rEntry.getValue());
}
}
System.out.println("-- end of different keys");
assertEquals(expectedMap, new HashMap<>(result));
}
}
Aggregations