use of com.hazelcast.jet.impl.execution.OutboundCollector in project hazelcast by hazelcast.
the class ExecutionPlan method createLocalOutboundCollector.
private OutboundCollector createLocalOutboundCollector(EdgeDef edge, int processorIndex, int totalPartitionCount, int[][] partitionsPerProcessor) {
int upstreamParallelism = edge.sourceVertex().localParallelism();
int downstreamParallelism = edge.destVertex().localParallelism();
int queueSize = edge.getConfig().getQueueSize();
int numRemoteMembers = ptionArrgmt.getRemotePartitionAssignment().size();
if (edge.routingPolicy() == RoutingPolicy.ISOLATED) {
ConcurrentConveyor<Object>[] localConveyors = localConveyorMap.computeIfAbsent(edge.edgeId(), edgeId -> {
int queueCount = upstreamParallelism / downstreamParallelism;
int remainder = upstreamParallelism % downstreamParallelism;
return Stream.concat(Arrays.stream(createConveyorArray(remainder, queueCount + 1, queueSize)), Arrays.stream(createConveyorArray(downstreamParallelism - remainder, Math.max(1, queueCount), queueSize))).toArray((IntFunction<ConcurrentConveyor<Object>[]>) ConcurrentConveyor[]::new);
});
OutboundCollector[] localCollectors = IntStream.range(0, downstreamParallelism).filter(i -> i % upstreamParallelism == processorIndex % downstreamParallelism).mapToObj(i -> new ConveyorCollector(localConveyors[i], processorIndex / downstreamParallelism, null)).toArray(OutboundCollector[]::new);
return compositeCollector(localCollectors, edge, totalPartitionCount, true);
} else {
ConcurrentConveyor<Object>[] localConveyors = localConveyorMap.computeIfAbsent(edge.edgeId(), edgeId -> {
int queueCount = upstreamParallelism + (!edge.isLocal() ? numRemoteMembers : 0);
return createConveyorArray(downstreamParallelism, queueCount, queueSize);
});
OutboundCollector[] localCollectors = new OutboundCollector[downstreamParallelism];
Arrays.setAll(localCollectors, n -> new ConveyorCollector(localConveyors[n], processorIndex, partitionsPerProcessor[n]));
return compositeCollector(localCollectors, edge, totalPartitionCount, true);
}
}
use of com.hazelcast.jet.impl.execution.OutboundCollector in project hazelcast by hazelcast.
the class ExecutionPlan method createOutboundEdgeStreams.
private List<OutboundEdgeStream> createOutboundEdgeStreams(VertexDef vertex, int processorIdx, String jobPrefix, InternalSerializationService jobSerializationService) {
List<OutboundEdgeStream> outboundStreams = new ArrayList<>();
for (EdgeDef edge : vertex.outboundEdges()) {
OutboundCollector outboundCollector = createOutboundCollector(edge, processorIdx, jobPrefix, jobSerializationService);
OutboundEdgeStream outboundEdgeStream = new OutboundEdgeStream(edge.sourceOrdinal(), outboundCollector);
outboundStreams.add(outboundEdgeStream);
}
return outboundStreams;
}
use of com.hazelcast.jet.impl.execution.OutboundCollector in project hazelcast by hazelcast.
the class ExecutionPlan method createIfAbsentReceiverTasklet.
private void createIfAbsentReceiverTasklet(EdgeDef edge, String jobPrefix, int[][] ptionsPerProcessor, int totalPtionCount, InternalSerializationService jobSerializationService) {
final ConcurrentConveyor<Object>[] localConveyors = localConveyorMap.get(edge.edgeId());
receiverMap.computeIfAbsent(edge.destVertex().vertexId(), x -> new HashMap<>()).computeIfAbsent(edge.destOrdinal(), x -> {
Map<Address, ReceiverTasklet> addrToTasklet = new HashMap<>();
// create a receiver per address
int offset = 0;
for (Address addr : ptionArrgmt.getRemotePartitionAssignment().keySet()) {
final OutboundCollector[] collectors = new OutboundCollector[ptionsPerProcessor.length];
// assign the queues starting from end
final int queueOffset = --offset;
Arrays.setAll(collectors, n -> new ConveyorCollector(localConveyors[n], localConveyors[n].queueCount() + queueOffset, ptionsPerProcessor[n]));
final OutboundCollector collector = compositeCollector(collectors, edge, totalPtionCount, true);
ReceiverTasklet receiverTasklet = new ReceiverTasklet(collector, jobSerializationService, edge.getConfig().getReceiveWindowMultiplier(), getJetConfig().getFlowControlPeriodMs(), nodeEngine.getLoggingService(), addr, edge.destOrdinal(), edge.destVertex().name(), memberConnections.get(addr), jobPrefix);
addrToTasklet.put(addr, receiverTasklet);
}
return addrToTasklet;
});
}
use of com.hazelcast.jet.impl.execution.OutboundCollector in project hazelcast by hazelcast.
the class ExecutionPlan method createOutboundCollector.
/**
* Each edge is represented by an array of conveyors between the producers and consumers.
* There are as many conveyors as there are consumers.
* Each conveyor has one queue per producer.
*
* For a distributed edge, there is one additional producer per member represented
* by the ReceiverTasklet.
*/
private OutboundCollector createOutboundCollector(EdgeDef edge, int processorIndex, String jobPrefix, InternalSerializationService jobSerializationService) {
if (edge.routingPolicy() == RoutingPolicy.ISOLATED && !edge.isLocal()) {
throw new IllegalArgumentException("Isolated edges must be local: " + edge);
}
int totalPartitionCount = nodeEngine.getPartitionService().getPartitionCount();
int[][] partitionsPerProcessor = getLocalPartitionDistribution(edge, edge.destVertex().localParallelism());
OutboundCollector localCollector = createLocalOutboundCollector(edge, processorIndex, totalPartitionCount, partitionsPerProcessor);
if (edge.isLocal()) {
return localCollector;
}
OutboundCollector[] remoteCollectors = createRemoteOutboundCollectors(edge, jobPrefix, processorIndex, totalPartitionCount, partitionsPerProcessor, jobSerializationService);
// in a distributed edge, collectors[0] is the composite of local collector, and
// collectors[n] where n > 0 is a collector pointing to a remote member _n_.
OutboundCollector[] collectors = new OutboundCollector[remoteCollectors.length + 1];
collectors[0] = localCollector;
System.arraycopy(remoteCollectors, 0, collectors, 1, collectors.length - 1);
return compositeCollector(collectors, edge, totalPartitionCount, false);
}
use of com.hazelcast.jet.impl.execution.OutboundCollector in project hazelcast-jet by hazelcast.
the class ExecutionPlan method initialize.
public void initialize(NodeEngine nodeEngine, long jobId, long executionId, SnapshotContext snapshotContext) {
this.nodeEngine = nodeEngine;
this.executionId = executionId;
initProcSuppliers();
initDag();
this.ptionArrgmt = new PartitionArrangement(partitionOwners, nodeEngine.getThisAddress());
JetInstance instance = getJetInstance(nodeEngine);
for (VertexDef vertex : vertices) {
Collection<? extends Processor> processors = createProcessors(vertex, vertex.localParallelism());
// create StoreSnapshotTasklet and the queues to it
QueuedPipe<Object>[] snapshotQueues = new QueuedPipe[vertex.localParallelism()];
Arrays.setAll(snapshotQueues, i -> new OneToOneConcurrentArrayQueue<>(SNAPSHOT_QUEUE_SIZE));
ConcurrentConveyor<Object> ssConveyor = ConcurrentConveyor.concurrentConveyor(null, snapshotQueues);
StoreSnapshotTasklet ssTasklet = new StoreSnapshotTasklet(snapshotContext, jobId, new ConcurrentInboundEdgeStream(ssConveyor, 0, 0, lastSnapshotId, true, -1, "ssFrom:" + vertex.name()), nodeEngine, vertex.name(), vertex.isHigherPriorityUpstream());
tasklets.add(ssTasklet);
int localProcessorIdx = 0;
for (Processor p : processors) {
int globalProcessorIndex = vertex.getProcIdxOffset() + localProcessorIdx;
String loggerName = createLoggerName(p.getClass().getName(), vertex.name(), globalProcessorIndex);
ProcCtx context = new ProcCtx(instance, nodeEngine.getSerializationService(), nodeEngine.getLogger(loggerName), vertex.name(), globalProcessorIndex, jobConfig.getProcessingGuarantee(), vertex.localParallelism(), vertex.totalParallelism());
String probePrefix = String.format("jet.job.%s.%s#%d", idToString(executionId), vertex.name(), localProcessorIdx);
((NodeEngineImpl) nodeEngine).getMetricsRegistry().scanAndRegister(p, probePrefix);
// createOutboundEdgeStreams() populates localConveyorMap and edgeSenderConveyorMap.
// Also populates instance fields: senderMap, receiverMap, tasklets.
List<OutboundEdgeStream> outboundStreams = createOutboundEdgeStreams(vertex, localProcessorIdx);
List<InboundEdgeStream> inboundStreams = createInboundEdgeStreams(vertex, localProcessorIdx);
OutboundCollector snapshotCollector = new ConveyorCollector(ssConveyor, localProcessorIdx, null);
ProcessorTasklet processorTasklet = new ProcessorTasklet(context, p, inboundStreams, outboundStreams, snapshotContext, snapshotCollector, jobConfig.getMaxWatermarkRetainMillis());
tasklets.add(processorTasklet);
this.processors.add(p);
localProcessorIdx++;
}
}
List<ReceiverTasklet> allReceivers = receiverMap.values().stream().flatMap(o -> o.values().stream()).flatMap(a -> a.values().stream()).collect(toList());
tasklets.addAll(allReceivers);
}
Aggregations