use of example.serialization.NodeDTO in project hazelcast by hazelcast.
the class CompactStreamSerializerTest method testDefaultsReflection_recursive.
@Test
public void testDefaultsReflection_recursive() {
SerializationService serializationService = createSerializationService();
NodeDTO node = new NodeDTO(new NodeDTO(new NodeDTO(2), 1), 0);
Data data = serializationService.toData(node);
Object object = serializationService.toObject(data);
NodeDTO o = (NodeDTO) object;
assertEquals(node, o);
}
use of example.serialization.NodeDTO in project hazelcast by hazelcast.
the class CompactStreamSerializerTest method testDefaultsReflection_insideCollection.
@Test
public void testDefaultsReflection_insideCollection() {
SerializationService serializationService = createSerializationService();
NodeDTO node = new NodeDTO(new NodeDTO(new NodeDTO(2), 1), 0);
EmployeeDTO employeeDTO = new EmployeeDTO(30, 102310312);
long[] ids = new long[] { 22, 44 };
EmployeeDTO[] employeeDTOS = new EmployeeDTO[5];
for (int j = 0; j < employeeDTOS.length; j++) {
employeeDTOS[j] = new EmployeeDTO(20 + j, j * 100);
}
EmployerDTO employerDTO = new EmployerDTO("nbss", 40, HIRING, ids, employeeDTO, employeeDTOS);
ArrayList<Object> expected = new ArrayList<>();
expected.add(node);
expected.add(employeeDTO);
expected.add(employerDTO);
Data data = serializationService.toData(expected);
ArrayList<Object> arrayList = serializationService.toObject(data);
assertEquals(node, arrayList.get(0));
assertEquals(employeeDTO, arrayList.get(1));
assertEquals(employerDTO, arrayList.get(2));
}
use of example.serialization.NodeDTO in project hazelcast by hazelcast.
the class CompactFormatSplitBrainTest method testSchemaAccessibleAfterMergingClusters.
@Test
public void testSchemaAccessibleAfterMergingClusters() {
Config config = smallInstanceConfig();
config.getMapConfig("map1").getMergePolicyConfig().setPolicy(PutIfAbsentMergePolicy.class.getName());
config.getMapConfig("map3").getMergePolicyConfig().setPolicy(PutIfAbsentMergePolicy.class.getName());
config.getSerializationConfig().setCompactSerializationConfig(new CompactSerializationConfig().setEnabled(true));
config.setProperty(ClusterProperty.MERGE_FIRST_RUN_DELAY_SECONDS.getName(), "1");
config.setProperty(ClusterProperty.MERGE_NEXT_RUN_DELAY_SECONDS.getName(), "1");
HazelcastInstance instance1 = factory.newHazelcastInstance(config);
HazelcastInstance instance2 = factory.newHazelcastInstance(config);
HazelcastInstance instance3 = factory.newHazelcastInstance(config);
SplitBrainTestSupport.blockCommunicationBetween(instance1, instance3);
closeConnectionBetween(instance1, instance3);
SplitBrainTestSupport.blockCommunicationBetween(instance2, instance3);
closeConnectionBetween(instance2, instance3);
// make sure that cluster is split as [ 1 , 2 ] , [ 3 ]
assertClusterSizeEventually(2, instance1, instance2);
assertClusterSizeEventually(1, instance3);
IMap<Integer, EmployeeDTO> map1 = instance1.getMap("map1");
for (int i = 0; i < 100; i++) {
EmployeeDTO employeeDTO = new EmployeeDTO(i, 102310312);
map1.put(i, employeeDTO);
}
IMap<Integer, NodeDTO> map3 = instance3.getMap("map3");
for (int i = 0; i < 100; i++) {
NodeDTO node = new NodeDTO(new NodeDTO(null, i), i);
map3.put(i, node);
}
assertEquals(100, map1.size());
assertEquals(100, map3.size());
SplitBrainTestSupport.unblockCommunicationBetween(instance1, instance3);
SplitBrainTestSupport.unblockCommunicationBetween(instance2, instance3);
assertClusterSizeEventually(3, instance1, instance2, instance3);
assertEquals(100, map1.size());
assertTrueEventually(() -> assertEquals(100, map3.size()));
int size1 = map1.keySet(Predicates.sql("age > 19")).size();
assertEquals(80, size1);
int size3 = map3.keySet(Predicates.sql("child.id > 19")).size();
assertEquals(80, size3);
}
use of example.serialization.NodeDTO in project hazelcast by hazelcast.
the class CompactWithSchemaStreamSerializerTest method testRecursive.
@Test
public void testRecursive() {
SerializationService serializationService = createSerializationService();
NodeDTO expected = new NodeDTO(new NodeDTO(new NodeDTO(2), 1), 0);
Data data = serializationService.toDataWithSchema(expected);
// Create a second schema service so that schemas are not shared accross these two
// This is to make sure that toObject call will use the schema in the data
SerializationService serializationService2 = createSerializationService();
NodeDTO actual = serializationService2.toObject(data);
assertEquals(expected, actual);
}
Aggregations