use of com.hazelcast.core.Member in project hazelcast by hazelcast.
the class ClientScheduledExecutorProxy method shutdown.
@Override
public void shutdown() {
Collection<Member> members = getContext().getClusterService().getMemberList();
Collection<Future> calls = new LinkedList<Future>();
for (Member member : members) {
ClientMessage request = ScheduledExecutorShutdownCodec.encodeRequest(getName(), member.getAddress());
calls.add(doSubmitOnAddress(request, SUBMIT_DECODER, member.getAddress()));
}
waitWithDeadline(calls, SHUTDOWN_TIMEOUT, TimeUnit.SECONDS, WHILE_SHUTDOWN_EXCEPTION_HANDLER);
}
use of com.hazelcast.core.Member in project hazelcast by hazelcast.
the class ClientScheduledExecutorProxy method scheduleOnMembersAtFixedRate.
@Override
public Map<Member, IScheduledFuture<?>> scheduleOnMembersAtFixedRate(Runnable command, Collection<Member> members, long initialDelay, long period, TimeUnit unit) {
checkNotNull(command, "Command is null");
checkNotNull(members, "Members is null");
checkNotNull(unit, "Unit is null");
String name = extractNameOrGenerateOne(command);
Callable adapter = createScheduledRunnableAdapter(command);
Map<Member, IScheduledFuture<?>> futures = new HashMap<Member, IScheduledFuture<?>>();
for (Member member : members) {
TaskDefinition definition = new TaskDefinition(TaskDefinition.Type.AT_FIXED_RATE, name, adapter, initialDelay, period, unit);
futures.put(member, scheduleOnMember(name, member, definition));
}
return futures;
}
use of com.hazelcast.core.Member in project hazelcast by hazelcast.
the class AbstractMapReduceTask method startSupervisionTask.
private void startSupervisionTask(JobTracker jobTracker) {
final MapReduceService mapReduceService = getService(MapReduceService.SERVICE_NAME);
final JobTrackerConfig config = ((AbstractJobTracker) jobTracker).getJobTrackerConfig();
final boolean communicateStats = config.isCommunicateStats();
final int chunkSize = getChunkSizeOrConfigChunkSize(config);
final TopologyChangedStrategy topologyChangedStrategy = getTopologyChangedStrategyOrConfigTopologyChangedStrategy(config);
final String name = getDistributedObjectName();
final String jobId = getJobId();
final KeyValueSource keyValueSource = getKeyValueSource();
final Mapper mapper = getMapper();
final CombinerFactory combinerFactory = getCombinerFactory();
final ReducerFactory reducerFactory = getReducerFactory();
final Collection keys = getKeys();
final Collection<Object> keyObjects = getKeyObjects(keys);
final KeyPredicate predicate = getPredicate();
final ClusterService clusterService = nodeEngine.getClusterService();
for (Member member : clusterService.getMembers(KeyValueJobOperation.MEMBER_SELECTOR)) {
Operation operation = new KeyValueJobOperation(name, jobId, chunkSize, keyValueSource, mapper, combinerFactory, reducerFactory, communicateStats, topologyChangedStrategy);
executeOperation(operation, member.getAddress(), mapReduceService, nodeEngine);
}
// After we prepared all the remote systems we can now start the processing
for (Member member : clusterService.getMembers(DATA_MEMBER_SELECTOR)) {
Operation operation = new StartProcessingJobOperation(name, jobId, keyObjects, predicate);
executeOperation(operation, member.getAddress(), mapReduceService, nodeEngine);
}
}
use of com.hazelcast.core.Member in project hazelcast by hazelcast.
the class MemberImpl method invokeOnAllMembers.
private void invokeOnAllMembers(Operation operation) {
NodeEngineImpl nodeEngine = instance.node.nodeEngine;
OperationService os = nodeEngine.getOperationService();
String uuid = nodeEngine.getLocalMember().getUuid();
operation.setCallerUuid(uuid).setNodeEngine(nodeEngine);
try {
for (Member member : nodeEngine.getClusterService().getMembers()) {
if (!member.localMember()) {
os.send(operation, member.getAddress());
} else {
os.execute(operation);
}
}
} catch (Throwable t) {
throw ExceptionUtil.rethrow(t);
}
}
use of com.hazelcast.core.Member in project hazelcast by hazelcast.
the class KeyValueJob method startSupervisionTask.
private <T> JobCompletableFuture<T> startSupervisionTask(TrackableJobFuture<T> jobFuture, String jobId) {
AbstractJobTracker jobTracker = (AbstractJobTracker) this.jobTracker;
JobTrackerConfig config = jobTracker.getJobTrackerConfig();
boolean communicateStats = config.isCommunicateStats();
if (chunkSize == -1) {
chunkSize = config.getChunkSize();
}
if (topologyChangedStrategy == null) {
topologyChangedStrategy = config.getTopologyChangedStrategy();
}
ClusterService clusterService = nodeEngine.getClusterService();
for (Member member : clusterService.getMembers(KeyValueJobOperation.MEMBER_SELECTOR)) {
Operation operation = new KeyValueJobOperation<KeyIn, ValueIn>(name, jobId, chunkSize, keyValueSource, mapper, combinerFactory, reducerFactory, communicateStats, topologyChangedStrategy);
executeOperation(operation, member.getAddress(), mapReduceService, nodeEngine);
}
// After we prepared all the remote systems we can now start the processing
for (Member member : clusterService.getMembers(DATA_MEMBER_SELECTOR)) {
Operation operation = new StartProcessingJobOperation<KeyIn>(name, jobId, keys, predicate);
executeOperation(operation, member.getAddress(), mapReduceService, nodeEngine);
}
return jobFuture;
}
Aggregations