use of org.opendaylight.mdsal.binding.util.TypedReadWriteTransaction in project netvirt by opendaylight.
the class VrfEntryListener method handleAdjacencyAndVpnOpInterfaceDeletion.
/**
* Check all the adjacency in VpnInterfaceOpData and decide whether to delete the entire interface or only adj.
* Remove Adjacency from VPNInterfaceOpData.
* if Adjacency != primary.
* if Adjacency == primary , then mark it for deletion.
* Remove entire VPNinterfaceOpData Entry.
* if sie of Adjacency <= 2 and all are marked for deletion , delete the entire VPNinterface Op entry.
* @param vrfEntry - VrfEntry removed
* @param ifName - Interface name from VRFentry
* @param vpnName - VPN name of corresponding VRF
* @param tx - ReadWrite Tx
*/
@SuppressFBWarnings(value = "UPM_UNCALLED_PRIVATE_METHOD", justification = "https://github.com/spotbugs/spotbugs/issues/811")
private void handleAdjacencyAndVpnOpInterfaceDeletion(VrfEntry vrfEntry, String ifName, String vpnName, TypedReadWriteTransaction<Operational> tx) throws ExecutionException, InterruptedException {
InstanceIdentifier<Adjacency> adjacencyIid = FibUtil.getAdjacencyIdentifierOp(ifName, vpnName, vrfEntry.getDestPrefix());
Optional<Adjacency> adjacencyOptional = tx.read(adjacencyIid).get();
if (adjacencyOptional.isPresent()) {
if (adjacencyOptional.get().getAdjacencyType() != Adjacency.AdjacencyType.PrimaryAdjacency) {
tx.delete(FibUtil.getAdjacencyIdentifierOp(ifName, vpnName, vrfEntry.getDestPrefix()));
} else {
tx.merge(adjacencyIid, new AdjacencyBuilder(adjacencyOptional.get()).setMarkedForDeletion(true).build());
}
}
Optional<AdjacenciesOp> optAdjacencies = tx.read(FibUtil.getAdjListPathOp(ifName, vpnName)).get();
if (!optAdjacencies.isPresent() || optAdjacencies.get().getAdjacency() == null) {
return;
}
@NonNull List<Adjacency> adjacencies = new ArrayList<Adjacency>(optAdjacencies.get().nonnullAdjacency().values());
if (adjacencies.size() <= 2 && adjacencies.stream().allMatch(adjacency -> adjacency.getAdjacencyType() == Adjacency.AdjacencyType.PrimaryAdjacency && adjacency.isMarkedForDeletion() != null && adjacency.isMarkedForDeletion())) {
LOG.info("Clean up vpn interface {} to vpn {} list.", ifName, vpnName);
tx.delete(FibUtil.getVpnInterfaceOpDataEntryIdentifier(ifName, vpnName));
}
}
use of org.opendaylight.mdsal.binding.util.TypedReadWriteTransaction in project netvirt by opendaylight.
the class HAOpNodeListener method readAndCopyChildPsOpToParent.
private void readAndCopyChildPsOpToParent(Node childNode, TypedReadWriteTransaction<Operational> tx) {
String childGlobalNodeId = childNode.getNodeId().getValue();
List<InstanceIdentifier> childPsIids = new ArrayList<>();
HwvtepGlobalAugmentation hwvtepGlobalAugmentation = childNode.augmentation(HwvtepGlobalAugmentation.class);
if (hwvtepGlobalAugmentation == null || HwvtepHAUtil.isEmpty(hwvtepGlobalAugmentation.nonnullSwitches().values())) {
haOpClusteredListener.getConnectedNodes().stream().filter(connectedIid -> IS_PS_CHILD_TO_GLOBAL_NODE.test(childGlobalNodeId, connectedIid)).forEach(connectedIid -> childPsIids.add(connectedIid));
} else {
hwvtepGlobalAugmentation.getSwitches().values().forEach(switches -> childPsIids.add(switches.getSwitchRef().getValue()));
}
if (childPsIids.isEmpty()) {
LOG.info("HAOpNodeListener No child ps found for global {}", childGlobalNodeId);
}
childPsIids.forEach(psIid -> {
try {
InstanceIdentifier<Node> childPsIid = psIid;
Optional<Node> childPsNode = tx.read(childPsIid).get();
if (childPsNode.isPresent()) {
LOG.debug("Child oper PS node found");
onPsNodeAdd(childPsIid, childPsNode.get(), tx);
} else {
LOG.error("HAOpNodeListener Child oper ps node not found {}", childPsIid);
}
} catch (ExecutionException | InterruptedException e) {
LOG.error("HAOpNodeListener Failed to read child ps node {}", psIid);
}
});
}
use of org.opendaylight.mdsal.binding.util.TypedReadWriteTransaction in project netvirt by opendaylight.
the class ElanNodeListener method createArpRequestMatchFlows.
private void createArpRequestMatchFlows(Uint64 dpId, TypedReadWriteTransaction<Configuration> tx) throws ExecutionException, InterruptedException {
long arpRequestGroupId = ArpResponderUtil.retrieveStandardArpResponderGroupId(idManagerService);
List<BucketInfo> buckets = ArpResponderUtil.getDefaultBucketInfos(NwConstants.ARP_RESPONDER_TABLE);
LOG.trace("Installing group flow on dpn {}", dpId);
GroupEntity groupEntity = MDSALUtil.buildGroupEntity(dpId, arpRequestGroupId, ArpResponderConstant.GROUP_FLOW_NAME.value(), GroupTypes.GroupAll, buckets);
mdsalManager.addGroup(tx, groupEntity);
InstanceIdentifier<Group> groupIid = ElanUtils.getGroupInstanceid(dpId, arpRequestGroupId);
if (tx.read(groupIid).get().isPresent()) {
LOG.info("group {} is present in the config hence adding the flow", arpRequestGroupId);
createArpRequestMatchFlowsForGroup(dpId, arpRequestGroupId, tx);
return;
}
eventCallbacks.onAddOrUpdate(LogicalDatastoreType.CONFIGURATION, ElanUtils.getGroupInstanceid(dpId, arpRequestGroupId), (unused, newGroupId) -> {
LOG.info("group {} added in the config", arpRequestGroupId);
LoggingFutures.addErrorLogging(txRunner.callWithNewReadWriteTransactionAndSubmit(CONFIGURATION, innerConfTx -> createArpRequestMatchFlowsForGroup(dpId, arpRequestGroupId, innerConfTx)), LOG, "Error adding flow for the group {}", arpRequestGroupId);
return DataTreeEventCallbackRegistrar.NextAction.UNREGISTER;
}, Duration.ofSeconds(5), iid -> LOG.error("arpRequestGroupId {} not found in Config datastore", arpRequestGroupId));
}
use of org.opendaylight.mdsal.binding.util.TypedReadWriteTransaction in project netvirt by opendaylight.
the class NodeCopier method copyPSNode.
public <D extends Datastore> void copyPSNode(Optional<Node> srcPsNodeOptional, InstanceIdentifier<Node> srcPsPath, InstanceIdentifier<Node> dstPsPath, InstanceIdentifier<Node> dstGlobalPath, Class<D> logicalDatastoreType, TypedReadWriteTransaction<D> tx) {
if (!srcPsNodeOptional.isPresent() && Configuration.class.equals(logicalDatastoreType)) {
Futures.addCallback(tx.read(srcPsPath), new FutureCallback<Optional<Node>>() {
@Override
public void onSuccess(Optional<Node> nodeOptional) {
HAJobScheduler.getInstance().submitJob(() -> {
TypedReadWriteTransaction<D> tx1 = new BatchedTransaction(logicalDatastoreType);
if (nodeOptional.isPresent()) {
copyPSNode(nodeOptional, srcPsPath, dstPsPath, dstGlobalPath, logicalDatastoreType, tx1);
} else {
tx1.put(dstPsPath, new NodeBuilder().setNodeId(dstPsPath.firstKeyOf(Node.class).getNodeId()).build());
}
});
}
@Override
public void onFailure(Throwable throwable) {
}
}, MoreExecutors.directExecutor());
return;
}
NodeBuilder dstPsNodeBuilder = HwvtepHAUtil.getNodeBuilderForPath(dstPsPath);
PhysicalSwitchAugmentationBuilder dstPsAugmentationBuilder = new PhysicalSwitchAugmentationBuilder();
PhysicalSwitchAugmentation srcPsAugmenatation = srcPsNodeOptional.get().augmentation(PhysicalSwitchAugmentation.class);
Node existingDstPsNode = null;
try {
existingDstPsNode = HwvtepHAUtil.readNode(tx, dstPsPath);
} catch (ExecutionException | InterruptedException e) {
LOG.error("NodeCopier Read Failed for Node:{}", dstPsPath);
}
PhysicalSwitchAugmentation existingDstPsAugmentation = HwvtepHAUtil.getPhysicalSwitchAugmentationOfNode(existingDstPsNode);
mergeOpManagedByAttributes(srcPsAugmenatation, dstPsAugmentationBuilder, dstGlobalPath);
if (Operational.class.equals(logicalDatastoreType)) {
psAugmentationMerger.mergeOperationalData(dstPsAugmentationBuilder, existingDstPsAugmentation, srcPsAugmenatation, dstPsPath);
psNodeMerger.mergeOperationalData(dstPsNodeBuilder, existingDstPsNode, srcPsNodeOptional.get(), dstPsPath);
dstPsNodeBuilder.addAugmentation(dstPsAugmentationBuilder.build());
Node dstPsNode = dstPsNodeBuilder.build();
tx.mergeParentStructureMerge(dstPsPath, dstPsNode);
} else {
/* Below Change done to rerduce the side of tx.put() generated here.
1. Check if child node already exists in config-topo.
2. If not present, then construct Child ps-node with augmentation data only and do tx.put(node).
Followed by, then tx.put(termination-points) for each of termination-points present in parent ps-node.
3. If present, then construct augmentation data and do tx.put(augmentation) then followed by
tx.put(termination-points) for each of termination-points present in parent ps-node.
*/
String dstNodeName = dstPsNodeBuilder.getNodeId().getValue();
psAugmentationMerger.mergeConfigData(dstPsAugmentationBuilder, srcPsAugmenatation, dstPsPath);
try {
boolean isEntryExists = tx.exists(dstPsPath).get();
if (isEntryExists) {
LOG.info("Destination PS Node: {} already exists in config-topo.", dstNodeName);
InstanceIdentifier<PhysicalSwitchAugmentation> dstPsAugPath = dstPsPath.augmentation(PhysicalSwitchAugmentation.class);
tx.put(dstPsAugPath, dstPsAugmentationBuilder.build());
} else {
LOG.info("Destination PS Node: {} doesn't still exist in config-topo.", dstNodeName);
dstPsNodeBuilder.addAugmentation(dstPsAugmentationBuilder.build());
Node dstPsNode = dstPsNodeBuilder.build();
tx.put(dstPsPath, dstPsNode);
}
} catch (InterruptedException | ExecutionException e) {
LOG.error("Error While checking Existing on Node {} in config-topo", dstPsPath);
}
psNodeMerger.mergeConfigData(dstPsNodeBuilder, srcPsNodeOptional.get(), dstPsPath);
if (dstPsNodeBuilder.getTerminationPoint() != null) {
dstPsNodeBuilder.getTerminationPoint().values().forEach(terminationPoint -> {
InstanceIdentifier<TerminationPoint> terminationPointPath = dstPsPath.child(TerminationPoint.class, terminationPoint.key());
tx.put(terminationPointPath, terminationPoint);
LOG.trace("Destination PS Node: {} updated with termination-point : {}", dstNodeName, terminationPoint.key());
});
}
}
LOG.debug("Copied {} physical switch node from {} to {}", logicalDatastoreType, srcPsPath, dstPsPath);
}
use of org.opendaylight.mdsal.binding.util.TypedReadWriteTransaction in project netvirt by opendaylight.
the class HwvtepNodeBaseListener method onDataTreeChanged.
@Override
public void onDataTreeChanged(final Collection<DataTreeModification<Node>> changes) {
// Batch Transaction used to internally submit to ResourceBatching Manager here
HAJobScheduler.getInstance().submitJob(() -> {
TypedReadWriteTransaction tx = getTx();
try {
processConnectedNodes(changes, tx);
processUpdatedNodes(changes, tx);
processDisconnectedNodes(changes, tx);
// tx.submit().get();
} catch (InterruptedException | ExecutionException | ReadFailedException e) {
LOG.error("Error processing data-tree changes", e);
}
});
}
Aggregations