use of com.hazelcast.spi.Operation in project hazelcast by hazelcast.
the class AbstractMapReduceTask method startSupervisionTask.
private void startSupervisionTask(JobTracker jobTracker) {
final MapReduceService mapReduceService = getService(MapReduceService.SERVICE_NAME);
final JobTrackerConfig config = ((AbstractJobTracker) jobTracker).getJobTrackerConfig();
final boolean communicateStats = config.isCommunicateStats();
final int chunkSize = getChunkSizeOrConfigChunkSize(config);
final TopologyChangedStrategy topologyChangedStrategy = getTopologyChangedStrategyOrConfigTopologyChangedStrategy(config);
final String name = getDistributedObjectName();
final String jobId = getJobId();
final KeyValueSource keyValueSource = getKeyValueSource();
final Mapper mapper = getMapper();
final CombinerFactory combinerFactory = getCombinerFactory();
final ReducerFactory reducerFactory = getReducerFactory();
final Collection keys = getKeys();
final Collection<Object> keyObjects = getKeyObjects(keys);
final KeyPredicate predicate = getPredicate();
final ClusterService clusterService = nodeEngine.getClusterService();
for (Member member : clusterService.getMembers(KeyValueJobOperation.MEMBER_SELECTOR)) {
Operation operation = new KeyValueJobOperation(name, jobId, chunkSize, keyValueSource, mapper, combinerFactory, reducerFactory, communicateStats, topologyChangedStrategy);
executeOperation(operation, member.getAddress(), mapReduceService, nodeEngine);
}
// After we prepared all the remote systems we can now start the processing
for (Member member : clusterService.getMembers(DATA_MEMBER_SELECTOR)) {
Operation operation = new StartProcessingJobOperation(name, jobId, keyObjects, predicate);
executeOperation(operation, member.getAddress(), mapReduceService, nodeEngine);
}
}
use of com.hazelcast.spi.Operation in project hazelcast by hazelcast.
the class KeyValueJob method startSupervisionTask.
private <T> JobCompletableFuture<T> startSupervisionTask(TrackableJobFuture<T> jobFuture, String jobId) {
AbstractJobTracker jobTracker = (AbstractJobTracker) this.jobTracker;
JobTrackerConfig config = jobTracker.getJobTrackerConfig();
boolean communicateStats = config.isCommunicateStats();
if (chunkSize == -1) {
chunkSize = config.getChunkSize();
}
if (topologyChangedStrategy == null) {
topologyChangedStrategy = config.getTopologyChangedStrategy();
}
ClusterService clusterService = nodeEngine.getClusterService();
for (Member member : clusterService.getMembers(KeyValueJobOperation.MEMBER_SELECTOR)) {
Operation operation = new KeyValueJobOperation<KeyIn, ValueIn>(name, jobId, chunkSize, keyValueSource, mapper, combinerFactory, reducerFactory, communicateStats, topologyChangedStrategy);
executeOperation(operation, member.getAddress(), mapReduceService, nodeEngine);
}
// After we prepared all the remote systems we can now start the processing
for (Member member : clusterService.getMembers(DATA_MEMBER_SELECTOR)) {
Operation operation = new StartProcessingJobOperation<KeyIn>(name, jobId, keys, predicate);
executeOperation(operation, member.getAddress(), mapReduceService, nodeEngine);
}
return jobFuture;
}
use of com.hazelcast.spi.Operation in project hazelcast by hazelcast.
the class AdvancedClusterStateTest method partitionInvocation_shouldFail_whenPartitionsNotAssigned_inFrozenState.
@Test(expected = IllegalStateException.class)
public void partitionInvocation_shouldFail_whenPartitionsNotAssigned_inFrozenState() throws InterruptedException {
Config config = new Config();
TestHazelcastInstanceFactory factory = createHazelcastInstanceFactory(3);
HazelcastInstance[] instances = factory.newInstances(config);
HazelcastInstance hz1 = instances[0];
HazelcastInstance hz2 = instances[1];
HazelcastInstance hz3 = instances[2];
hz2.getCluster().changeClusterState(ClusterState.FROZEN);
InternalOperationService operationService = getNode(hz3).getNodeEngine().getOperationService();
Operation op = new AddAndGetOperation(randomName(), 1);
Future<Long> future = operationService.invokeOnPartition(AtomicLongService.SERVICE_NAME, op, 1);
try {
future.get();
fail("Partition invocation must fail, because partitions cannot be assigned!");
} catch (ExecutionException e) {
// IllegalStateException should be cause of ExecutionException.
throw ExceptionUtil.rethrow(e);
}
}
use of com.hazelcast.spi.Operation in project hazelcast by hazelcast.
the class BasicRecordStoreLoader method sendOperation.
private Future<?> sendOperation(List<Data> keyValueSequence, AtomicInteger finishedBatchCounter) {
OperationService operationService = mapServiceContext.getNodeEngine().getOperationService();
final Operation operation = createOperation(keyValueSequence, finishedBatchCounter);
//operationService.executeOperation(operation);
return operationService.invokeOnPartition(MapService.SERVICE_NAME, operation, partitionId);
}
use of com.hazelcast.spi.Operation in project hazelcast by hazelcast.
the class OperationThread method process.
private void process(Object task) {
try {
if (task.getClass() == Packet.class) {
Packet packet = (Packet) task;
currentRunner = getOperationRunner(packet.getPartitionId());
currentRunner.run(packet);
completedPacketCount.inc();
} else if (task instanceof Operation) {
Operation operation = (Operation) task;
currentRunner = getOperationRunner(operation.getPartitionId());
currentRunner.run(operation);
completedOperationCount.inc();
} else if (task instanceof PartitionSpecificRunnable) {
PartitionSpecificRunnable runnable = (PartitionSpecificRunnable) task;
currentRunner = getOperationRunner(runnable.getPartitionId());
currentRunner.run(runnable);
completedPartitionSpecificRunnableCount.inc();
} else if (task instanceof Runnable) {
Runnable runnable = (Runnable) task;
runnable.run();
completedRunnableCount.inc();
} else {
throw new IllegalStateException("Unhandled task type for task:" + task);
}
completedTotalCount.inc();
} catch (Throwable t) {
errorCount.inc();
inspectOutOfMemoryError(t);
logger.severe("Failed to process packet: " + task + " on " + getName(), t);
} finally {
currentRunner = null;
}
}
Aggregations