use of com.hazelcast.spi.impl.NodeEngine in project hazelcast by hazelcast.
the class XAResourceImpl method clearRemoteTransactions.
private void clearRemoteTransactions(Xid xid) {
NodeEngine nodeEngine = getNodeEngine();
IPartitionService partitionService = nodeEngine.getPartitionService();
OperationService operationService = nodeEngine.getOperationService();
SerializableXID serializableXID = new SerializableXID(xid.getFormatId(), xid.getGlobalTransactionId(), xid.getBranchQualifier());
Data xidData = nodeEngine.toData(serializableXID);
int partitionId = partitionService.getPartitionId(xidData);
ClearRemoteTransactionOperation operation = new ClearRemoteTransactionOperation(xidData);
operationService.invokeOnPartition(SERVICE_NAME, operation, partitionId);
}
use of com.hazelcast.spi.impl.NodeEngine in project hazelcast by hazelcast.
the class XATransaction method commitAsync.
public void commitAsync(BiConsumer callback) {
if (state != PREPARED) {
throw new IllegalStateException("Transaction is not prepared");
}
checkTimeout();
state = COMMITTING;
BiConsumer wrappedCallback = (input, throwable) -> {
try {
callback.accept(input, throwable);
} finally {
if (throwable != null) {
transactionLog.onCommitFailure();
} else {
transactionLog.onCommitSuccess();
}
}
};
transactionLog.commitAsync(nodeEngine, wrappedCallback);
// We should rethrow exception if transaction is not TWO_PHASE
state = COMMITTED;
}
use of com.hazelcast.spi.impl.NodeEngine in project hazelcast by hazelcast.
the class XAResourceImpl method recover.
@Override
public Xid[] recover(int flag) throws XAException {
NodeEngine nodeEngine = getNodeEngine();
XAService xaService = getService();
OperationService operationService = nodeEngine.getOperationService();
ClusterService clusterService = nodeEngine.getClusterService();
Collection<Member> memberList = clusterService.getMembers();
List<Future<SerializableList>> futureList = new ArrayList<Future<SerializableList>>();
for (Member member : memberList) {
if (member.localMember()) {
continue;
}
CollectRemoteTransactionsOperation op = new CollectRemoteTransactionsOperation();
Address address = member.getAddress();
InternalCompletableFuture<SerializableList> future = operationService.invokeOnTarget(SERVICE_NAME, op, address);
futureList.add(future);
}
Set<SerializableXID> xids = new HashSet<SerializableXID>(xaService.getPreparedXids());
for (Future<SerializableList> future : futureList) {
try {
SerializableList xidSet = future.get();
for (Data xidData : xidSet) {
SerializableXID xid = nodeEngine.toObject(xidData);
xids.add(xid);
}
} catch (InterruptedException e) {
currentThread().interrupt();
throw new XAException(XAException.XAER_RMERR);
} catch (MemberLeftException e) {
logger.warning("Member left while recovering", e);
} catch (ExecutionException e) {
Throwable cause = e.getCause();
if (cause instanceof HazelcastInstanceNotActiveException || cause instanceof TargetNotMemberException) {
logger.warning("Member left while recovering", e);
} else {
throw new XAException(XAException.XAER_RMERR);
}
}
}
return xids.toArray(new SerializableXID[0]);
}
use of com.hazelcast.spi.impl.NodeEngine in project hazelcast by hazelcast.
the class XaReplicationOperation method run.
@Override
public void run() throws Exception {
XAService xaService = getService();
NodeEngine nodeEngine = getNodeEngine();
for (XATransactionDTO transactionDTO : migrationData) {
XATransaction transaction = new XATransaction(nodeEngine, transactionDTO.getRecords(), transactionDTO.getTxnId(), transactionDTO.getXid(), transactionDTO.getOwnerUuid(), transactionDTO.getTimeoutMilis(), transactionDTO.getStartTime());
xaService.putTransaction(transaction);
}
}
use of com.hazelcast.spi.impl.NodeEngine in project hazelcast by hazelcast.
the class ExecutionPlanBuilder method createExecutionPlans.
@SuppressWarnings("checkstyle:ParameterNumber")
public static Map<MemberInfo, ExecutionPlan> createExecutionPlans(NodeEngineImpl nodeEngine, List<MemberInfo> memberInfos, DAG dag, long jobId, long executionId, JobConfig jobConfig, long lastSnapshotId, boolean isLightJob, Subject subject) {
final int defaultParallelism = nodeEngine.getConfig().getJetConfig().getCooperativeThreadCount();
final Map<MemberInfo, int[]> partitionsByMember = getPartitionAssignment(nodeEngine, memberInfos);
final Map<Address, int[]> partitionsByAddress = partitionsByMember.entrySet().stream().collect(toMap(en -> en.getKey().getAddress(), Entry::getValue));
final List<Address> addresses = toList(partitionsByMember.keySet(), MemberInfo::getAddress);
final int clusterSize = partitionsByMember.size();
final boolean isJobDistributed = clusterSize > 1;
final EdgeConfig defaultEdgeConfig = nodeEngine.getConfig().getJetConfig().getDefaultEdgeConfig();
final Map<MemberInfo, ExecutionPlan> plans = new HashMap<>();
int memberIndex = 0;
for (MemberInfo member : partitionsByMember.keySet()) {
plans.put(member, new ExecutionPlan(partitionsByAddress, jobConfig, lastSnapshotId, memberIndex++, clusterSize, isLightJob, subject));
}
final Map<String, Integer> vertexIdMap = assignVertexIds(dag);
for (Entry<String, Integer> entry : vertexIdMap.entrySet()) {
final Vertex vertex = dag.getVertex(entry.getKey());
assert vertex != null;
final ProcessorMetaSupplier metaSupplier = vertex.getMetaSupplier();
final int vertexId = entry.getValue();
// The local parallelism determination here is effective only
// in jobs submitted as DAG. Otherwise, in jobs submitted as
// pipeline, we are already doing this determination while
// converting it to DAG and there is no vertex left with LP=-1.
final int localParallelism = vertex.determineLocalParallelism(defaultParallelism);
final int totalParallelism = localParallelism * clusterSize;
final List<EdgeDef> inbound = toEdgeDefs(dag.getInboundEdges(vertex.getName()), defaultEdgeConfig, e -> vertexIdMap.get(e.getSourceName()), isJobDistributed);
final List<EdgeDef> outbound = toEdgeDefs(dag.getOutboundEdges(vertex.getName()), defaultEdgeConfig, e -> vertexIdMap.get(e.getDestName()), isJobDistributed);
String prefix = prefix(jobConfig.getName(), jobId, vertex.getName(), "#PMS");
ILogger logger = prefixedLogger(nodeEngine.getLogger(metaSupplier.getClass()), prefix);
JetServiceBackend jetBackend = nodeEngine.getService(JetServiceBackend.SERVICE_NAME);
JobClassLoaderService jobClassLoaderService = jetBackend.getJobClassLoaderService();
ClassLoader processorClassLoader = jobClassLoaderService.getClassLoader(jobId);
try {
doWithClassLoader(processorClassLoader, () -> metaSupplier.init(new MetaSupplierCtx(nodeEngine, jobId, executionId, jobConfig, logger, vertex.getName(), localParallelism, totalParallelism, clusterSize, isLightJob, partitionsByAddress, subject, processorClassLoader)));
} catch (Exception e) {
throw sneakyThrow(e);
}
Function<? super Address, ? extends ProcessorSupplier> procSupplierFn = doWithClassLoader(processorClassLoader, () -> metaSupplier.get(addresses));
for (Entry<MemberInfo, ExecutionPlan> e : plans.entrySet()) {
final ProcessorSupplier processorSupplier = doWithClassLoader(processorClassLoader, () -> procSupplierFn.apply(e.getKey().getAddress()));
if (!isLightJob) {
// We avoid the check for light jobs - the user will get the error anyway, but maybe with less information.
// And we can recommend the user to use normal job to have more checks.
checkSerializable(processorSupplier, "ProcessorSupplier in vertex '" + vertex.getName() + '\'');
}
final VertexDef vertexDef = new VertexDef(vertexId, vertex.getName(), processorSupplier, localParallelism);
vertexDef.addInboundEdges(inbound);
vertexDef.addOutboundEdges(outbound);
e.getValue().addVertex(vertexDef);
}
}
return plans;
}
Aggregations