use of com.hazelcast.spi.NodeEngine in project hazelcast by hazelcast.
the class ObjectMultiMapProxy method tryLock.
@Override
public boolean tryLock(K key, long time, TimeUnit timeunit) throws InterruptedException {
checkNotNull(key, NULL_KEY_IS_NOT_ALLOWED);
NodeEngine nodeEngine = getNodeEngine();
Data dataKey = nodeEngine.toData(key);
return lockSupport.tryLock(nodeEngine, dataKey, time, timeunit);
}
use of com.hazelcast.spi.NodeEngine in project hazelcast-jet by hazelcast.
the class Networking method handleFlowControlPacket.
private void handleFlowControlPacket(Address fromAddr, byte[] packet) throws IOException {
try (BufferObjectDataInput in = createObjectDataInput(nodeEngine, packet)) {
final int executionCtxCount = in.readInt();
for (int j = 0; j < executionCtxCount; j++) {
final long executionId = in.readLong();
final Map<Integer, Map<Integer, Map<Address, SenderTasklet>>> senderMap = jobExecutionService.getSenderMap(executionId);
if (senderMap == null) {
logMissingExeCtx(executionId);
continue;
}
final int flowCtlMsgCount = in.readInt();
for (int k = 0; k < flowCtlMsgCount; k++) {
int destVertexId = in.readInt();
int destOrdinal = in.readInt();
int sendSeqLimitCompressed = in.readInt();
final SenderTasklet t = Optional.ofNullable(senderMap.get(destVertexId)).map(ordinalMap -> ordinalMap.get(destOrdinal)).map(addrMap -> addrMap.get(fromAddr)).orElse(null);
if (t == null) {
logMissingSenderTasklet(destVertexId, destOrdinal);
return;
}
t.setSendSeqLimitCompressed(sendSeqLimitCompressed);
}
}
}
}
use of com.hazelcast.spi.NodeEngine in project hazelcast-jet by hazelcast.
the class ExecutionPlan method initialize.
public void initialize(NodeEngine nodeEngine, long jobId, long executionId, SnapshotContext snapshotContext) {
this.nodeEngine = nodeEngine;
this.executionId = executionId;
initProcSuppliers();
initDag();
this.ptionArrgmt = new PartitionArrangement(partitionOwners, nodeEngine.getThisAddress());
JetInstance instance = getJetInstance(nodeEngine);
for (VertexDef vertex : vertices) {
Collection<? extends Processor> processors = createProcessors(vertex, vertex.localParallelism());
// create StoreSnapshotTasklet and the queues to it
QueuedPipe<Object>[] snapshotQueues = new QueuedPipe[vertex.localParallelism()];
Arrays.setAll(snapshotQueues, i -> new OneToOneConcurrentArrayQueue<>(SNAPSHOT_QUEUE_SIZE));
ConcurrentConveyor<Object> ssConveyor = ConcurrentConveyor.concurrentConveyor(null, snapshotQueues);
StoreSnapshotTasklet ssTasklet = new StoreSnapshotTasklet(snapshotContext, jobId, new ConcurrentInboundEdgeStream(ssConveyor, 0, 0, lastSnapshotId, true, -1, "ssFrom:" + vertex.name()), nodeEngine, vertex.name(), vertex.isHigherPriorityUpstream());
tasklets.add(ssTasklet);
int localProcessorIdx = 0;
for (Processor p : processors) {
int globalProcessorIndex = vertex.getProcIdxOffset() + localProcessorIdx;
String loggerName = createLoggerName(p.getClass().getName(), vertex.name(), globalProcessorIndex);
ProcCtx context = new ProcCtx(instance, nodeEngine.getSerializationService(), nodeEngine.getLogger(loggerName), vertex.name(), globalProcessorIndex, jobConfig.getProcessingGuarantee(), vertex.localParallelism(), vertex.totalParallelism());
String probePrefix = String.format("jet.job.%s.%s#%d", idToString(executionId), vertex.name(), localProcessorIdx);
((NodeEngineImpl) nodeEngine).getMetricsRegistry().scanAndRegister(p, probePrefix);
// createOutboundEdgeStreams() populates localConveyorMap and edgeSenderConveyorMap.
// Also populates instance fields: senderMap, receiverMap, tasklets.
List<OutboundEdgeStream> outboundStreams = createOutboundEdgeStreams(vertex, localProcessorIdx);
List<InboundEdgeStream> inboundStreams = createInboundEdgeStreams(vertex, localProcessorIdx);
OutboundCollector snapshotCollector = new ConveyorCollector(ssConveyor, localProcessorIdx, null);
ProcessorTasklet processorTasklet = new ProcessorTasklet(context, p, inboundStreams, outboundStreams, snapshotContext, snapshotCollector, jobConfig.getMaxWatermarkRetainMillis());
tasklets.add(processorTasklet);
this.processors.add(p);
localProcessorIdx++;
}
}
List<ReceiverTasklet> allReceivers = receiverMap.values().stream().flatMap(o -> o.values().stream()).flatMap(a -> a.values().stream()).collect(toList());
tasklets.addAll(allReceivers);
}
use of com.hazelcast.spi.NodeEngine in project hazelcast-jet by hazelcast.
the class ExecutionPlan method memberToSenderConveyorMap.
/**
* Creates (if absent) for the given edge one sender tasklet per remote member,
* each with a single conveyor with a number of producer queues feeding it.
* Populates the {@link #senderMap} and {@link #tasklets} fields.
*/
private Map<Address, ConcurrentConveyor<Object>> memberToSenderConveyorMap(Map<String, Map<Address, ConcurrentConveyor<Object>>> edgeSenderConveyorMap, EdgeDef edge) {
assert edge.isDistributed() : "Edge is not distributed";
return edgeSenderConveyorMap.computeIfAbsent(edge.edgeId(), x -> {
final Map<Address, ConcurrentConveyor<Object>> addrToConveyor = new HashMap<>();
for (Address destAddr : remoteMembers.get()) {
final ConcurrentConveyor<Object> conveyor = createConveyorArray(1, edge.sourceVertex().localParallelism(), edge.getConfig().getQueueSize())[0];
final ConcurrentInboundEdgeStream inboundEdgeStream = newEdgeStream(edge, conveyor, "sender-toVertex:" + edge.destVertex().name() + "-toMember:" + destAddr.toString().replace('.', '-'));
final int destVertexId = edge.destVertex().vertexId();
final SenderTasklet t = new SenderTasklet(inboundEdgeStream, nodeEngine, destAddr, executionId, destVertexId, edge.getConfig().getPacketSizeLimit());
senderMap.computeIfAbsent(destVertexId, xx -> new HashMap<>()).computeIfAbsent(edge.destOrdinal(), xx -> new HashMap<>()).put(destAddr, t);
tasklets.add(t);
addrToConveyor.put(destAddr, conveyor);
}
return addrToConveyor;
});
}
use of com.hazelcast.spi.NodeEngine in project hazelcast-jet by hazelcast.
the class ExecutionPlanBuilder method createExecutionPlans.
public static Map<MemberInfo, ExecutionPlan> createExecutionPlans(NodeEngine nodeEngine, MembersView membersView, DAG dag, JobConfig jobConfig, long lastSnapshotId) {
final JetInstance instance = getJetInstance(nodeEngine);
final int defaultParallelism = instance.getConfig().getInstanceConfig().getCooperativeThreadCount();
final Collection<MemberInfo> members = new HashSet<>(membersView.size());
final Address[] partitionOwners = new Address[nodeEngine.getPartitionService().getPartitionCount()];
initPartitionOwnersAndMembers(nodeEngine, membersView, members, partitionOwners);
final List<Address> addresses = members.stream().map(MemberInfo::getAddress).collect(toList());
final int clusterSize = members.size();
final boolean isJobDistributed = clusterSize > 1;
final EdgeConfig defaultEdgeConfig = instance.getConfig().getDefaultEdgeConfig();
final Map<MemberInfo, ExecutionPlan> plans = members.stream().collect(toMap(m -> m, m -> new ExecutionPlan(partitionOwners, jobConfig, lastSnapshotId)));
final Map<String, Integer> vertexIdMap = assignVertexIds(dag);
for (Entry<String, Integer> entry : vertexIdMap.entrySet()) {
final Vertex vertex = dag.getVertex(entry.getKey());
final ProcessorMetaSupplier metaSupplier = vertex.getMetaSupplier();
final int vertexId = entry.getValue();
final int localParallelism = determineParallelism(vertex, metaSupplier.preferredLocalParallelism(), defaultParallelism);
final int totalParallelism = localParallelism * clusterSize;
final List<EdgeDef> inbound = toEdgeDefs(dag.getInboundEdges(vertex.getName()), defaultEdgeConfig, e -> vertexIdMap.get(e.getSourceName()), isJobDistributed);
final List<EdgeDef> outbound = toEdgeDefs(dag.getOutboundEdges(vertex.getName()), defaultEdgeConfig, e -> vertexIdMap.get(e.getDestName()), isJobDistributed);
final ILogger logger = nodeEngine.getLogger(String.format("%s.%s#ProcessorMetaSupplier", metaSupplier.getClass().getName(), vertex.getName()));
metaSupplier.init(new MetaSupplierCtx(instance, logger, vertex.getName(), localParallelism, totalParallelism));
Function<Address, ProcessorSupplier> procSupplierFn = metaSupplier.get(addresses);
int procIdxOffset = 0;
for (Entry<MemberInfo, ExecutionPlan> e : plans.entrySet()) {
final ProcessorSupplier processorSupplier = procSupplierFn.apply(e.getKey().getAddress());
checkSerializable(processorSupplier, "ProcessorSupplier in vertex '" + vertex.getName() + '\'');
final VertexDef vertexDef = new VertexDef(vertexId, vertex.getName(), processorSupplier, procIdxOffset, localParallelism, totalParallelism);
vertexDef.addInboundEdges(inbound);
vertexDef.addOutboundEdges(outbound);
e.getValue().addVertex(vertexDef);
procIdxOffset += localParallelism;
}
}
return plans;
}
Aggregations