use of com.hazelcast.internal.services.DistributedObjectNamespace in project hazelcast by hazelcast.
the class Invocation_BlockingTest method sync_whenManyGettersAndLotsOfWaiting.
/**
* Tests if the future on a blocking operation can be shared by multiple threads. This tests fails in 3.6 because
* only 1 thread will be able to swap out CONTINUE_WAIT and all other threads will fail with an OperationTimeoutExcepyion
*/
@Test
public void sync_whenManyGettersAndLotsOfWaiting() throws Exception {
int callTimeout = 10000;
Config config = new Config().setProperty(OPERATION_CALL_TIMEOUT_MILLIS.getName(), "" + callTimeout);
TestHazelcastInstanceFactory factory = createHazelcastInstanceFactory(2);
HazelcastInstance local = factory.newHazelcastInstance(config);
HazelcastInstance remote = factory.newHazelcastInstance(config);
NodeEngineImpl nodeEngine = getNodeEngineImpl(local);
String key = generateKeyOwnedBy(remote);
int partitionId = nodeEngine.getPartitionService().getPartitionId(key);
// first we execute an operation that stall the partition
OperationServiceImpl opService = nodeEngine.getOperationService();
// first we are going to lock
int otherThreadId = 1;
LockOperation otherOp = new LockOperation(new DistributedObjectNamespace(SERVICE_NAME, key), nodeEngine.toData(key), otherThreadId, -1, -1);
opService.createInvocationBuilder(null, otherOp, partitionId).setCallTimeout(callTimeout).invoke().join();
// then we are going to send the invocation and share the future by many threads
int thisThreadId = 2;
LockOperation thisOp = new LockOperation(new DistributedObjectNamespace(SERVICE_NAME, key), nodeEngine.toData(key), thisThreadId, -1, -1);
final InternalCompletableFuture<Object> future = opService.createInvocationBuilder(null, thisOp, partitionId).setCallTimeout(callTimeout).invoke();
// now we are going to do a get on the future by a whole bunch of threads
final List<Future> futures = new LinkedList<Future>();
for (int k = 0; k < 10; k++) {
futures.add(spawn(new Callable() {
@Override
public Object call() throws Exception {
return future.join();
}
}));
}
// lets do a very long wait so that the heartbeat/retrying mechanism have kicked in.
// the lock remains locked; so the threads calling future.get remain blocked
sleepMillis(callTimeout * 5);
// unlocking the lock
UnlockOperation op = new UnlockOperation(new DistributedObjectNamespace(SERVICE_NAME, key), nodeEngine.toData(key), otherThreadId);
opService.createInvocationBuilder(null, op, partitionId).setCallTimeout(callTimeout).invoke().join();
// now the futures should all unblock
for (Future f : futures) {
assertEquals(Boolean.TRUE, f.get());
}
}
use of com.hazelcast.internal.services.DistributedObjectNamespace in project hazelcast by hazelcast.
the class Invocation_BlockingTest method async_whenMultipleAndThenOnSameFuture.
/**
* Tests if the future on a blocking operation can be shared by multiple threads. This tests fails in 3.6 because
* only 1 thread will be able to swap out CONTINUE_WAIT and all other threads will fail with an OperationTimeoutExcepyion
*/
@Test
public void async_whenMultipleAndThenOnSameFuture() {
int callTimeout = 5000;
Config config = new Config().setProperty(OPERATION_CALL_TIMEOUT_MILLIS.getName(), "" + callTimeout);
TestHazelcastInstanceFactory factory = createHazelcastInstanceFactory(2);
HazelcastInstance local = factory.newHazelcastInstance(config);
final HazelcastInstance remote = factory.newHazelcastInstance(config);
NodeEngineImpl nodeEngine = getNodeEngineImpl(local);
String key = generateKeyOwnedBy(remote);
int partitionId = nodeEngine.getPartitionService().getPartitionId(key);
// first we execute an operation that stall the partition.
OperationServiceImpl opService = nodeEngine.getOperationService();
// first we are going to lock
int otherThreadId = 1;
LockOperation otherOp = new LockOperation(new DistributedObjectNamespace(SERVICE_NAME, key), nodeEngine.toData(key), otherThreadId, -1, -1);
opService.createInvocationBuilder(null, otherOp, partitionId).setCallTimeout(callTimeout).invoke().join();
// then we are going to send another lock request by a different thread; so it can't complete
int thisThreadId = 2;
LockOperation thisOp = new LockOperation(new DistributedObjectNamespace(SERVICE_NAME, key), nodeEngine.toData(key), thisThreadId, -1, -1);
final InternalCompletableFuture<Object> future = opService.createInvocationBuilder(null, thisOp, partitionId).setCallTimeout(callTimeout).invoke();
// then we register a bunch of listeners
int listenerCount = 10;
final CountDownLatch listenersCompleteLatch = new CountDownLatch(listenerCount);
for (int k = 0; k < 10; k++) {
future.whenCompleteAsync((response, t) -> {
if (t == null) {
if (Boolean.TRUE.equals(response)) {
listenersCompleteLatch.countDown();
} else {
System.out.println(response);
}
} else {
t.printStackTrace();
}
});
}
// let's do a very long wait so that the heartbeat/retrying mechanism have kicked in.
// the lock remains locked; so the threads calling future.get remain blocked
sleepMillis(callTimeout * 5);
// unlocking the lock
UnlockOperation op = new UnlockOperation(new DistributedObjectNamespace(SERVICE_NAME, key), nodeEngine.toData(key), otherThreadId);
opService.createInvocationBuilder(null, op, partitionId).setCallTimeout(callTimeout).invoke().join();
// and all the listeners should complete
assertOpenEventually(listenersCompleteLatch);
}
use of com.hazelcast.internal.services.DistributedObjectNamespace in project hazelcast by hazelcast.
the class Invocation_BlockingTest method async_whenHeartbeatTimeout.
/**
* Tests that an ExecutionCallback is called when an OperationTimeoutException happens. This is a problem in 3.6
* since async calls don't get the same timeout logic.
*/
@Test
public void async_whenHeartbeatTimeout() {
int callTimeout = 5000;
Config config = new Config().setProperty(OPERATION_CALL_TIMEOUT_MILLIS.getName(), "" + callTimeout);
TestHazelcastInstanceFactory factory = createHazelcastInstanceFactory(2);
HazelcastInstance local = factory.newHazelcastInstance(config);
HazelcastInstance remote = factory.newHazelcastInstance(config);
warmUpPartitions(factory.getAllHazelcastInstances());
NodeEngineImpl nodeEngine = getNodeEngineImpl(local);
String key = generateKeyOwnedBy(remote);
int partitionId = nodeEngine.getPartitionService().getPartitionId(key);
// first we execute an operation that stall the partition.
OperationServiceImpl opService = nodeEngine.getOperationService();
opService.invokeOnPartition(null, new SlowOperation(5 * callTimeout), partitionId);
// then we execute a lock operation that won't be executed because the partition is blocked.
LockOperation op = new LockOperation(new DistributedObjectNamespace(SERVICE_NAME, key), nodeEngine.toData(key), 1, -1, -1);
InternalCompletableFuture<Object> future = opService.createInvocationBuilder(null, op, partitionId).setCallTimeout(callTimeout).invoke();
// then we register our callback
final BiConsumer<Object, Throwable> callback = getExecutionCallbackMock();
future.whenCompleteAsync(callback);
// and we eventually expect to fail with an OperationTimeoutException
assertFailsEventuallyWithOperationTimeoutException(callback);
}
use of com.hazelcast.internal.services.DistributedObjectNamespace in project hazelcast by hazelcast.
the class LockInterceptorServiceTest method testLockingInterceptor.
private void testLockingInterceptor(LockInterceptingService implementation) {
Config config = new Config();
ConfigAccessor.getServicesConfig(config).addServiceConfig(new ServiceConfig().setEnabled(true).setName(LockInterceptingService.SERVICE_NAME).setImplementation(implementation));
HazelcastInstance member = createHazelcastInstance(config);
NodeEngine nodeEngine = getNodeEngineImpl(member);
implementation.serializationService = getSerializationService(member);
LockProxySupport lockProxySupport = new LockProxySupport(new DistributedObjectNamespace(LockInterceptingService.SERVICE_NAME, "test-object"), 10000);
for (int i = 0; i < 100; i++) {
try {
Data key = getSerializationService(member).toData("key" + i);
lockProxySupport.lock(nodeEngine, key);
} catch (RuntimeException e) {
ignore(e);
}
}
}
Aggregations