use of com.hazelcast.jet.impl.execution.init.JetInitDataSerializerHook.INIT_EXECUTION_OP in project hazelcast by hazelcast.
the class TopologyChangeTest method when_jobParticipantReceivesStaleInitOperation_then_jobRestarts.
@Test
public void when_jobParticipantReceivesStaleInitOperation_then_jobRestarts() {
// Given
HazelcastInstance newInstance = createHazelcastInstance(config);
for (HazelcastInstance instance : instances) {
assertClusterSizeEventually(NODE_COUNT + 1, instance);
}
rejectOperationsBetween(instances[0], instances[2], JetInitDataSerializerHook.FACTORY_ID, singletonList(INIT_EXECUTION_OP));
DAG dag = new DAG().vertex(new Vertex("test", new MockPS(TestProcessors.Identity::new, nodeCount + 1)));
Job job = instances[0].getJet().newJob(dag);
JetServiceBackend jetServiceBackend = getJetServiceBackend(instances[0]);
assertTrueEventually(() -> assertFalse(jetServiceBackend.getJobCoordinationService().getMasterContexts().isEmpty()));
MasterContext masterContext = jetServiceBackend.getJobCoordinationService().getMasterContext(job.getId());
assertTrueEventually(() -> {
assertEquals(STARTING, masterContext.jobStatus());
assertNotEquals(0, masterContext.executionId());
});
// When
long executionId = masterContext.executionId();
assertTrueEventually(() -> {
Arrays.stream(instances).filter(instance -> !instance.getCluster().getLocalMember().isLiteMember()).filter(instance -> instance != instances[2]).map(JetTestSupport::getJetServiceBackend).map(service -> service.getJobExecutionService().getExecutionContext(executionId)).forEach(Assert::assertNotNull);
});
newInstance.getLifecycleService().terminate();
for (HazelcastInstance instance : instances) {
assertClusterSizeEventually(NODE_COUNT, instance);
}
resetPacketFiltersFrom(instances[0]);
// Then
job.join();
assertNotEquals(executionId, masterContext.executionId());
}
use of com.hazelcast.jet.impl.execution.init.JetInitDataSerializerHook.INIT_EXECUTION_OP in project hazelcast-jet by hazelcast.
the class TopologyChangeTest method when_jobParticipantReceivesStaleInitOperation_then_jobRestarts.
@Test
public void when_jobParticipantReceivesStaleInitOperation_then_jobRestarts() {
// Given
JetInstance newInstance = createJetMember(config);
for (JetInstance instance : instances) {
assertClusterSizeEventually(NODE_COUNT + 1, instance.getHazelcastInstance());
}
rejectOperationsBetween(instances[0].getHazelcastInstance(), instances[2].getHazelcastInstance(), JetInitDataSerializerHook.FACTORY_ID, singletonList(INIT_EXECUTION_OP));
DAG dag = new DAG().vertex(new Vertex("test", new MockPS(TestProcessors.Identity::new, nodeCount + 1)));
Job job = instances[0].newJob(dag);
JetService jetService = getJetService(instances[0]);
assertTrueEventually(() -> assertFalse(jetService.getJobCoordinationService().getMasterContexts().isEmpty()));
MasterContext masterContext = jetService.getJobCoordinationService().getMasterContext(job.getId());
assertTrueEventually(() -> {
assertEquals(STARTING, masterContext.jobStatus());
assertNotEquals(0, masterContext.getExecutionId());
});
// When
long executionId = masterContext.getExecutionId();
assertTrueEventually(() -> {
Arrays.stream(instances).filter(instance -> !instance.getHazelcastInstance().getCluster().getLocalMember().isLiteMember()).filter(instance -> instance != instances[2]).map(JetTestSupport::getJetService).map(service -> service.getJobExecutionService().getExecutionContext(executionId)).forEach(Assert::assertNotNull);
});
newInstance.getHazelcastInstance().getLifecycleService().terminate();
for (JetInstance instance : instances) {
assertClusterSizeEventually(NODE_COUNT, instance.getHazelcastInstance());
}
resetPacketFiltersFrom(instances[0].getHazelcastInstance());
// Then
job.join();
assertNotEquals(executionId, masterContext.getExecutionId());
}
Aggregations