use of com.hazelcast.client.impl.clientside.HazelcastClientInstanceImpl in project hazelcast by hazelcast.
the class ProxyFactoryTest method testProxyCreation.
private void testProxyCreation(String serviceName, ClientConfig clientConfig) {
HazelcastInstance client = hazelcastFactory.newHazelcastClient(clientConfig);
HazelcastClientInstanceImpl clientInstanceImpl = getHazelcastClientInstanceImpl(client);
context = clientInstanceImpl.getProxyManager().getContext();
ClientProxy proxy = client.getDistributedObject(serviceName, "CustomClientProxy");
assertEquals(serviceName, proxy.getServiceName());
assertEquals("CustomClientProxy", proxy.getName());
}
use of com.hazelcast.client.impl.clientside.HazelcastClientInstanceImpl in project hazelcast by hazelcast.
the class ClientQueryCacheDestroyResourcesTest method destroy_deregisters_listeners.
@Test
public void destroy_deregisters_listeners() {
QueryCache<String, String> queryCache1 = map1.getQueryCache(QUERY_CACHE_NAME_1);
QueryCache<String, String> queryCache2 = map2.getQueryCache(QUERY_CACHE_NAME_2);
QueryCache<String, String> queryCache3 = map3.getQueryCache(QUERY_CACHE_NAME_3);
HazelcastClientInstanceImpl client = getHazelcastClientInstanceImpl(clientInstance);
ClientListenerServiceImpl listenerService = (ClientListenerServiceImpl) client.getListenerService();
int numberOfListenersBeforeDestroy = listenerService.getRegistrations().size();
queryCache1.destroy();
queryCache2.destroy();
queryCache3.destroy();
final Map<UUID, ClientListenerRegistration> registrations = listenerService.getRegistrations();
// we expect at least 1 for backup listener and 1 listener for ProxyManager and
// we expect 3 listeners to be deleted one for each queryCache.
assertEquals(registrations.toString(), 3, numberOfListenersBeforeDestroy - registrations.size());
}
use of com.hazelcast.client.impl.clientside.HazelcastClientInstanceImpl in project hazelcast by hazelcast.
the class AsyncMapImpl method putAllAsync.
@SuppressWarnings("unchecked")
public CompletionStage<Void> putAllAsync(Map<? extends K, ? extends V> items) {
ClientMapProxy<K, V> targetMap = (ClientMapProxy<K, V>) map;
if (items.isEmpty()) {
return completedFuture(null);
}
checkNotNull(targetMap, "Null argument map is not allowed");
ClientPartitionService partitionService = targetMap.getContext().getPartitionService();
int partitionCount = partitionService.getPartitionCount();
Map<Integer, List<Entry<Data, Data>>> entryMap = new HashMap<>(partitionCount);
InternalSerializationService serializationService = targetMap.getContext().getSerializationService();
for (Entry<? extends K, ? extends V> entry : items.entrySet()) {
checkNotNull(entry.getKey(), "Null key is not allowed");
checkNotNull(entry.getValue(), "Null value is not allowed");
Data keyData = serializationService.toData(entry.getKey());
int partitionId = partitionService.getPartitionId(keyData);
entryMap.computeIfAbsent(partitionId, k -> new ArrayList<>()).add(new AbstractMap.SimpleEntry<>(keyData, serializationService.toData(entry.getValue())));
}
HazelcastClientInstanceImpl client = (HazelcastClientInstanceImpl) targetMap.getContext().getHazelcastInstance();
CompletableFuture<Void> resultFuture = new CompletableFuture<>();
ExecutionCallback callback = createPutAllCallback(entryMap.size(), targetMap instanceof NearCachedClientMapProxy ? ((NearCachedClientMapProxy) targetMap).getNearCache() : null, items.keySet(), entryMap.values().stream().flatMap(List::stream).map(Entry::getKey), resultFuture);
for (Entry<Integer, List<Entry<Data, Data>>> partitionEntries : entryMap.entrySet()) {
Integer partitionId = partitionEntries.getKey();
// use setAsync if there's only one entry
if (partitionEntries.getValue().size() == 1) {
Entry<Data, Data> onlyEntry = partitionEntries.getValue().get(0);
// cast to raw so that we can pass serialized key and value
((IMap) targetMap).setAsync(onlyEntry.getKey(), onlyEntry.getValue()).andThen(callback);
} else {
ClientMessage request = MapPutAllCodec.encodeRequest(targetMap.getName(), partitionEntries.getValue());
new ClientInvocation(client, request, targetMap.getName(), partitionId).invoke().andThen(callback);
}
}
return resultFuture;
}
use of com.hazelcast.client.impl.clientside.HazelcastClientInstanceImpl in project hazelcast by hazelcast.
the class TestHazelcastFactory method newHazelcastClient.
public HazelcastInstance newHazelcastClient(ClientConfig config, String sourceIp) {
if (!mockNetwork) {
HazelcastInstance client = HazelcastClient.newHazelcastClient(config);
registerJvmNameAndPidMetric(((HazelcastClientProxy) client).client);
return client;
}
if (config == null) {
config = new XmlClientConfigBuilder().build();
}
Thread currentThread = Thread.currentThread();
ClassLoader tccl = currentThread.getContextClassLoader();
try {
if (tccl == ClassLoader.getSystemClassLoader()) {
currentThread.setContextClassLoader(HazelcastClient.class.getClassLoader());
}
HazelcastClientInstanceImpl client = new HazelcastClientInstanceImpl(getInstanceName(config), config, null, clientRegistry.createClientServiceFactory(sourceIp), createAddressProvider(config));
registerJvmNameAndPidMetric(client);
client.start();
if (clients.putIfAbsent(client.getName(), client) != null) {
throw new InvalidConfigurationException("HazelcastClientInstance with name '" + client.getName() + "' already exists!");
}
OutOfMemoryErrorDispatcher.registerClient(client);
return new HazelcastClientProxy(client);
} finally {
currentThread.setContextClassLoader(tccl);
}
}
use of com.hazelcast.client.impl.clientside.HazelcastClientInstanceImpl in project hazelcast by hazelcast.
the class ClientStatisticsTest method testStatisticsCollectionNonDefaultPeriod.
@Test
public void testStatisticsCollectionNonDefaultPeriod() {
HazelcastInstance hazelcastInstance = hazelcastFactory.newHazelcastInstance();
final HazelcastClientInstanceImpl client = createHazelcastClient();
final ClientEngineImpl clientEngine = getClientEngineImpl(hazelcastInstance);
long clientConnectionTime = System.currentTimeMillis();
// wait enough time for statistics collection
waitForFirstStatisticsCollection(client, clientEngine);
Map<String, String> stats = getStats(client, clientEngine);
String connStat = stats.get("clusterConnectionTimestamp");
assertNotNull(format("clusterConnectionTimestamp should not be null (%s)", stats), connStat);
Long connectionTimeStat = Long.valueOf(connStat);
assertNotNull(format("connectionTimeStat should not be null (%s)", stats), connStat);
TcpClientConnection aConnection = (TcpClientConnection) client.getConnectionManager().getActiveConnections().iterator().next();
String expectedClientAddress = aConnection.getLocalSocketAddress().getAddress().getHostAddress();
assertEquals(expectedClientAddress, stats.get("clientAddress"));
assertEquals(BuildInfoProvider.getBuildInfo().getVersion(), stats.get("clientVersion"));
assertEquals(client.getName(), stats.get("clientName"));
// time measured by us after client connection should be greater than the connection time reported by the statistics
assertTrue(format("connectionTimeStat was %d, clientConnectionTime was %d (%s)", connectionTimeStat, clientConnectionTime, stats), clientConnectionTime >= connectionTimeStat);
String mapHits = stats.get(MAP_HITS_KEY);
assertNull(format("%s should be null (%s)", MAP_HITS_KEY, stats), mapHits);
String cacheHits = stats.get(CACHE_HITS_KEY);
assertNull(format("%s should be null (%s)", CACHE_HITS_KEY, stats), cacheHits);
String lastStatisticsCollectionTimeString = stats.get("lastStatisticsCollectionTime");
final long lastCollectionTime = Long.parseLong(lastStatisticsCollectionTimeString);
// this creates empty map statistics
client.getMap(MAP_NAME);
// wait enough time for statistics collection
waitForNextStatsCollection(client, clientEngine, lastStatisticsCollectionTimeString);
assertTrueEventually(() -> {
Map<String, String> stats12 = getStats(client, clientEngine);
String mapHits12 = stats12.get(MAP_HITS_KEY);
assertNotNull(format("%s should not be null (%s)", MAP_HITS_KEY, stats12), mapHits12);
assertEquals(format("Expected 0 map hits (%s)", stats12), "0", mapHits12);
String cacheHits12 = stats12.get(CACHE_HITS_KEY);
assertNull(format("%s should be null (%s)", CACHE_HITS_KEY, stats12), cacheHits12);
// verify that collection is periodic
verifyThatCollectionIsPeriodic(stats12, lastCollectionTime);
});
// produce map and cache stat
produceSomeStats(hazelcastInstance, client);
assertTrueEventually(() -> {
Map<String, String> stats1 = getStats(client, clientEngine);
String mapHits1 = stats1.get(MAP_HITS_KEY);
assertNotNull(format("%s should not be null (%s)", MAP_HITS_KEY, stats1), mapHits1);
assertEquals(format("Expected 1 map hits (%s)", stats1), "1", mapHits1);
String cacheHits1 = stats1.get(CACHE_HITS_KEY);
assertNotNull(format("%s should not be null (%s)", CACHE_HITS_KEY, stats1), cacheHits1);
assertEquals(format("Expected 1 cache hits (%s)", stats1), "1", cacheHits1);
});
}
Aggregations