use of com.hazelcast.internal.nio.Connection in project hazelcast by hazelcast.
the class Networking method createFlowControlPacket.
private Map<Address, byte[]> createFlowControlPacket() throws IOException {
class MemberData {
final BufferObjectDataOutput output = createObjectDataOutput(nodeEngine, lastFlowPacketSize);
final Connection memberConnection;
Long startedExecutionId;
MemberData(Address address) {
memberConnection = getMemberConnection(nodeEngine, address);
}
}
Map<Address, MemberData> res = new HashMap<>();
for (ExecutionContext execCtx : jobExecutionService.getExecutionContexts()) {
Map<SenderReceiverKey, ReceiverTasklet> receiverMap = execCtx.receiverMap();
if (receiverMap == null) {
continue;
}
for (Entry<SenderReceiverKey, ReceiverTasklet> en : receiverMap.entrySet()) {
assert !en.getKey().address.equals(nodeEngine.getThisAddress());
MemberData md = res.computeIfAbsent(en.getKey().address, address -> new MemberData(address));
if (md.startedExecutionId == null) {
md.startedExecutionId = execCtx.executionId();
md.output.writeLong(md.startedExecutionId);
}
assert en.getKey().vertexId != TERMINAL_VERTEX_ID;
md.output.writeInt(en.getKey().vertexId);
md.output.writeInt(en.getKey().ordinal);
md.output.writeInt(en.getValue().updateAndGetSendSeqLimitCompressed(md.memberConnection));
}
for (MemberData md : res.values()) {
if (md.startedExecutionId != null) {
// write a mark to terminate values for an execution
md.output.writeInt(TERMINAL_VERTEX_ID);
md.startedExecutionId = null;
}
}
}
for (MemberData md : res.values()) {
assert md.output.position() > 0;
// write a mark to terminate all executions
// Execution IDs are generated using Flake ID generator and those are >0 normally, we
// use MIN_VALUE as a terminator.
md.output.writeLong(TERMINAL_EXECUTION_ID);
}
// finalize the packets
int maxSize = 0;
for (Entry<Address, MemberData> entry : res.entrySet()) {
byte[] data = entry.getValue().output.toByteArray();
// we break type safety to avoid creating a new map, we replace the values to a different type in place
@SuppressWarnings({ "unchecked", "rawtypes" }) Entry<Address, byte[]> entry1 = (Entry) entry;
entry1.setValue(data);
if (data.length > maxSize) {
maxSize = data.length;
}
}
lastFlowPacketSize = maxSize;
return (Map) res;
}
use of com.hazelcast.internal.nio.Connection in project hazelcast by hazelcast.
the class SqlClientService method fetchAsync.
public void fetchAsync(Connection connection, QueryId queryId, int cursorBufferSize, SqlClientResult res) {
ClientMessage requestMessage = SqlFetchCodec.encodeRequest(queryId, cursorBufferSize);
ClientInvocationFuture future = invokeAsync(requestMessage, connection);
future.whenComplete(withTryCatch(logger, (message, error) -> handleFetchResponse(connection, res, message, error)));
}
use of com.hazelcast.internal.nio.Connection in project hazelcast by hazelcast.
the class TcpClientConnectionTest method destroyConnection_whenDestroyedMultipleTimes_thenListenerRemoveCalledOnce.
@Test
public void destroyConnection_whenDestroyedMultipleTimes_thenListenerRemoveCalledOnce() {
HazelcastInstance server = hazelcastFactory.newHazelcastInstance();
HazelcastInstance client = hazelcastFactory.newHazelcastClient();
HazelcastClientInstanceImpl clientImpl = ClientTestUtil.getHazelcastClientInstanceImpl(client);
ClientConnectionManager connectionManager = clientImpl.getConnectionManager();
final CountingConnectionListener listener = new CountingConnectionListener();
connectionManager.addConnectionListener(listener);
UUID serverUuid = server.getCluster().getLocalMember().getUuid();
final Connection connectionToServer = connectionManager.getConnection(serverUuid);
ReconnectListener reconnectListener = new ReconnectListener();
clientImpl.getLifecycleService().addLifecycleListener(reconnectListener);
connectionToServer.close(null, null);
assertOpenEventually(reconnectListener.reconnectedLatch);
connectionToServer.close(null, null);
assertEqualsEventually(() -> listener.connectionRemovedCount.get(), 1);
sleepMillis(100);
assertEquals("connection removed should be called only once", 1, listener.connectionRemovedCount.get());
}
use of com.hazelcast.internal.nio.Connection in project hazelcast by hazelcast.
the class ExecutionPlan method initialize.
/**
* A method called on the members as part of the InitExecutionOperation.
* Creates tasklets, inboxes/outboxes and connects these to make them ready
* for a later StartExecutionOperation.
*/
public void initialize(NodeEngineImpl nodeEngine, long jobId, long executionId, @Nonnull SnapshotContext snapshotContext, ConcurrentHashMap<String, File> tempDirectories, InternalSerializationService jobSerializationService) {
this.nodeEngine = nodeEngine;
this.jobClassLoaderService = ((JetServiceBackend) nodeEngine.getService(JetServiceBackend.SERVICE_NAME)).getJobClassLoaderService();
this.executionId = executionId;
initProcSuppliers(jobId, tempDirectories, jobSerializationService);
initDag(jobSerializationService);
this.ptionArrgmt = new PartitionArrangement(partitionAssignment, nodeEngine.getThisAddress());
Set<Integer> higherPriorityVertices = VertexDef.getHigherPriorityVertices(vertices);
for (Address destAddr : remoteMembers.get()) {
Connection conn = getMemberConnection(nodeEngine, destAddr);
if (conn == null) {
throw new TopologyChangedException("no connection to job participant: " + destAddr);
}
memberConnections.put(destAddr, conn);
}
for (VertexDef vertex : vertices) {
ClassLoader processorClassLoader = isLightJob ? null : jobClassLoaderService.getProcessorClassLoader(jobId, vertex.name());
Collection<? extends Processor> processors = doWithClassLoader(processorClassLoader, () -> createProcessors(vertex, vertex.localParallelism()));
String jobPrefix = prefix(jobConfig.getName(), jobId, vertex.name());
// create StoreSnapshotTasklet and the queues to it
ConcurrentConveyor<Object> ssConveyor = null;
if (!isLightJob) {
// Note that we create the snapshot queues for all non-light jobs, even if they don't have
// processing guarantee enabled, because in EE one can request a snapshot also for
// non-snapshotted jobs.
@SuppressWarnings("unchecked") QueuedPipe<Object>[] snapshotQueues = new QueuedPipe[vertex.localParallelism()];
Arrays.setAll(snapshotQueues, i -> new OneToOneConcurrentArrayQueue<>(SNAPSHOT_QUEUE_SIZE));
ssConveyor = ConcurrentConveyor.concurrentConveyor(null, snapshotQueues);
ILogger storeSnapshotLogger = prefixedLogger(nodeEngine.getLogger(StoreSnapshotTasklet.class), jobPrefix);
StoreSnapshotTasklet ssTasklet = new StoreSnapshotTasklet(snapshotContext, ConcurrentInboundEdgeStream.create(ssConveyor, 0, 0, true, jobPrefix + "/ssFrom", null), new AsyncSnapshotWriterImpl(nodeEngine, snapshotContext, vertex.name(), memberIndex, memberCount, jobSerializationService), storeSnapshotLogger, vertex.name(), higherPriorityVertices.contains(vertex.vertexId()));
tasklets.add(ssTasklet);
}
int localProcessorIdx = 0;
for (Processor processor : processors) {
int globalProcessorIndex = memberIndex * vertex.localParallelism() + localProcessorIdx;
String processorPrefix = prefix(jobConfig.getName(), jobId, vertex.name(), globalProcessorIndex);
ILogger logger = prefixedLogger(nodeEngine.getLogger(processor.getClass()), processorPrefix);
ProcCtx context = new ProcCtx(nodeEngine, jobId, executionId, getJobConfig(), logger, vertex.name(), localProcessorIdx, globalProcessorIndex, isLightJob, partitionAssignment, vertex.localParallelism(), memberIndex, memberCount, tempDirectories, jobSerializationService, subject, processorClassLoader);
// createOutboundEdgeStreams() populates localConveyorMap and edgeSenderConveyorMap.
// Also populates instance fields: senderMap, receiverMap, tasklets.
List<OutboundEdgeStream> outboundStreams = createOutboundEdgeStreams(vertex, localProcessorIdx, jobPrefix, jobSerializationService);
List<InboundEdgeStream> inboundStreams = createInboundEdgeStreams(vertex, localProcessorIdx, jobPrefix, globalProcessorIndex);
OutboundCollector snapshotCollector = ssConveyor == null ? null : new ConveyorCollector(ssConveyor, localProcessorIdx, null);
// vertices which are only used for snapshot restore will not be marked as "source=true" in metrics
// also do not consider snapshot restore edges for determining source tag
boolean isSource = vertex.inboundEdges().stream().allMatch(EdgeDef::isSnapshotRestoreEdge) && !vertex.isSnapshotVertex();
ProcessorTasklet processorTasklet = new ProcessorTasklet(context, nodeEngine.getExecutionService().getExecutor(TASKLET_INIT_CLOSE_EXECUTOR_NAME), jobSerializationService, processor, inboundStreams, outboundStreams, snapshotContext, snapshotCollector, isSource);
tasklets.add(processorTasklet);
this.processors.add(processor);
localProcessorIdx++;
}
}
List<ReceiverTasklet> allReceivers = receiverMap.values().stream().flatMap(o -> o.values().stream()).flatMap(a -> a.values().stream()).collect(toList());
tasklets.addAll(allReceivers);
}
use of com.hazelcast.internal.nio.Connection in project hazelcast by hazelcast.
the class SqlClientExecuteCloseRaceTest method testExecuteClose.
@Test
public void testExecuteClose() {
QueryId queryId = QueryId.create(UUID.randomUUID());
// Send "execute"
Connection connection = clientService.getQueryConnection();
ClientMessage executeResponse = sendExecuteRequest(connection, queryId);
checkExecuteResponse(executeResponse, true);
assertEquals(1, memberService.getInternalService().getClientStateRegistry().getCursorCount());
// Send "close"
ClientMessage closeRequest = SqlCloseCodec.encodeRequest(queryId);
clientService.invokeOnConnection(connection, closeRequest);
assertEquals(0, memberService.getInternalService().getClientStateRegistry().getCursorCount());
}
Aggregations