use of org.opendaylight.yang.gen.v1.http.openconfig.net.yang.bgp.multiprotocol.rev151009.bgp.common.afi.safi.list.afi.safi.graceful.restart.Config in project netvirt by opendaylight.
the class ElanL2GatewayUtils method getL2GwDeviceLocalMacsAndRunCallback.
public void getL2GwDeviceLocalMacsAndRunCallback(String elanName, L2GatewayDevice l2gwDevice, Function<Collection<MacAddress>, Void> function) {
if (l2gwDevice == null) {
return;
}
Set<MacAddress> macs = new HashSet<>();
Collection<LocalUcastMacs> lstUcastLocalMacs = l2gwDevice.getUcastLocalMacs();
if (!lstUcastLocalMacs.isEmpty()) {
macs.addAll(lstUcastLocalMacs.stream().filter(Objects::nonNull).map(mac -> new MacAddress(mac.getMacEntryKey().getValue().toLowerCase(Locale.getDefault()))).collect(Collectors.toList()));
}
InstanceIdentifier<Node> nodeIid = HwvtepSouthboundUtils.createInstanceIdentifier(new NodeId(l2gwDevice.getHwvtepNodeId()));
Futures.addCallback(broker.newReadOnlyTransaction().read(LogicalDatastoreType.CONFIGURATION, nodeIid), new FutureCallback<Optional<Node>>() {
@Override
public void onSuccess(Optional<Node> configNode) {
if (configNode != null && configNode.isPresent()) {
HwvtepGlobalAugmentation augmentation = configNode.get().augmentation(HwvtepGlobalAugmentation.class);
if (augmentation != null && augmentation.nonnullLocalUcastMacs() != null) {
macs.addAll(augmentation.nonnullLocalUcastMacs().values().stream().filter(mac -> getLogicalSwitchName(mac).equals(elanName)).map(mac -> mac.getMacEntryKey()).collect(Collectors.toSet()));
}
function.apply(macs);
}
}
@Override
public void onFailure(Throwable throwable) {
LOG.error("Failed to read config topology node {}", nodeIid);
}
}, MoreExecutors.directExecutor());
}
use of org.opendaylight.yang.gen.v1.http.openconfig.net.yang.bgp.multiprotocol.rev151009.bgp.common.afi.safi.list.afi.safi.graceful.restart.Config in project netvirt by opendaylight.
the class L2GatewayConnectionListener method loadL2GwDeviceCache.
private void loadL2GwDeviceCache(TypedReadTransaction tx) {
allNodes = (Map<InstanceIdentifier<Node>, Node>) readAllConfigNodes(tx).stream().collect(toMap(TO_NODE_PATH, Function.identity()));
LOG.trace("Loading all config nodes");
Set<InstanceIdentifier<Node>> allIids = allNodes.keySet();
Map<String, List<InstanceIdentifier<Node>>> psNodesByDeviceName = allIids.stream().filter(IS_PS_NODE).collect(groupingBy(GET_DEVICE_NAME, toList()));
// Process HA nodes
createHANodes(allIids);
// Process non HA nodes there will be only one ps node iid for each device for non ha nodes
psNodesByDeviceName.values().stream().filter(psIids -> psIids.size() == 1).map(psIids -> psIids.get(0)).forEach(psIid -> {
Node psNode = allNodes.get(psIid);
Node globalNode = allNodes.get(TO_GLOBAL_PATH.apply(psNode));
if (globalNode != null) {
addL2DeviceToCache(psIid, globalNode, psNode);
}
});
}
use of org.opendaylight.yang.gen.v1.http.openconfig.net.yang.bgp.multiprotocol.rev151009.bgp.common.afi.safi.list.afi.safi.graceful.restart.Config in project netvirt by opendaylight.
the class ElanL2GatewayMulticastUtils method putRemoteMcastMac.
/**
* Put remote mcast mac in config DS.
*
* @param nodeId
* the node id
* @param logicalSwitchName
* the logical switch name
* @param tepIps
* the tep ips
*/
private ListenableFuture<Void> putRemoteMcastMac(NodeId nodeId, String logicalSwitchName, ArrayList<IpAddress> tepIps, boolean addCase) {
List<LocatorSet> locators = new ArrayList<>();
for (IpAddress tepIp : tepIps) {
locators.add(buildLocatorSet(nodeId, tepIp));
}
HwvtepLogicalSwitchRef lsRef = new HwvtepLogicalSwitchRef(HwvtepSouthboundUtils.createLogicalSwitchesInstanceIdentifier(nodeId, new HwvtepNodeName(logicalSwitchName)));
RemoteMcastMacs newMac = new RemoteMcastMacsBuilder().setMacEntryKey(new MacAddress(ElanConstants.UNKNOWN_DMAC)).setLogicalSwitchRef(lsRef).setLocatorSet(locators).build();
InstanceIdentifier<RemoteMcastMacs> iid = HwvtepSouthboundUtils.createRemoteMcastMacsInstanceIdentifier(nodeId, newMac.key());
RemoteMcastMacs existingMac = configMcastCache.getMac(newMac.getLogicalSwitchRef().getValue());
if (!addCase) {
// proactively update the cache for delete cases do not wait for batch manager to delete from cache
// while the delete is in progress from the batch manager the below skip may trigger
// by updating the cache upfront the skip wont be triggered
configMcastCache.added(iid, newMac);
}
if (addCase && existingMac != null && existingMac.getLocatorSet() != null) {
Set existingLocators = new HashSet<>(existingMac.getLocatorSet());
List newLocators = newMac.getLocatorSet();
if (existingLocators.containsAll(newLocators)) {
return Futures.immediateFuture(null);
}
}
return ResourceBatchingManager.getInstance().put(ResourceBatchingManager.ShardResource.CONFIG_TOPOLOGY, iid, newMac);
}
use of org.opendaylight.yang.gen.v1.http.openconfig.net.yang.bgp.multiprotocol.rev151009.bgp.common.afi.safi.list.afi.safi.graceful.restart.Config in project netvirt by opendaylight.
the class HAOpNodeListener method onGlobalNodeAdd.
@Override
public void onGlobalNodeAdd(InstanceIdentifier<Node> childGlobalPath, Node childNode, TypedReadWriteTransaction<Operational> tx) {
// copy child global node to ha global node
// create ha global config node if not present
// copy ha global config node to child global config node
LOG.info("HAOpNodeListener Node connected {} - Checking if Ha or Non-Ha enabled {}", childNode.getNodeId().getValue(), getManagers(childNode));
haOpClusteredListener.onGlobalNodeAdd(childGlobalPath, childNode, tx);
txRunner.callWithNewReadWriteTransactionAndSubmit(CONFIGURATION, configTx -> {
if (IS_NOT_HA_CHILD.test(childGlobalPath)) {
LOG.info("HAOpNodeListener The connected node is not a HA child {}", childNode.getNodeId().getValue());
if (hwvtepHACache.isHAParentNode(childGlobalPath)) {
LOG.info("HAOpNodeListener this is Parent Node {}", childNode.getNodeId().getValue());
HwvtepGlobalAugmentation globalAugmentation = childNode.augmentation(HwvtepGlobalAugmentation.class);
String operDbVersion = globalAugmentation.getDbVersion();
try {
Optional<Node> globalConfigNodeOptional = configTx.read(childGlobalPath).get();
if (globalConfigNodeOptional.isPresent()) {
HwvtepGlobalAugmentation globalConfigAugmentation = globalConfigNodeOptional.get().augmentation(HwvtepGlobalAugmentation.class);
String configDbVersion = globalConfigAugmentation.getDbVersion();
if (operDbVersion != null && !operDbVersion.equals(configDbVersion)) {
LOG.info("Change in Db version from {} to {} for Node {}", configDbVersion, operDbVersion, childGlobalPath);
HwvtepGlobalAugmentationBuilder haBuilder = new HwvtepGlobalAugmentationBuilder(globalConfigAugmentation);
haBuilder.setDbVersion(operDbVersion);
NodeBuilder nodeBuilder = new NodeBuilder(childNode);
nodeBuilder.addAugmentation(haBuilder.build());
configTx.merge(childGlobalPath, nodeBuilder.build());
} else {
LOG.debug("No Change in Db version from {} to {} for Node {}", configDbVersion, operDbVersion, childGlobalPath);
}
}
} catch (ExecutionException | InterruptedException ex) {
LOG.error("HAOpNodeListener Failed to read node {} from Config DS", childGlobalPath);
}
}
return;
}
InstanceIdentifier<Node> haNodePath = hwvtepHACache.getParent(childGlobalPath);
LOG.info("HAOpNodeListener Ha enabled child node connected {} create parent oper node", childNode.getNodeId().getValue());
try {
nodeCopier.copyGlobalNode(Optional.ofNullable(childNode), childGlobalPath, haNodePath, OPERATIONAL, tx);
Optional<Node> existingDstGlobalNodeOptional = tx.read(haNodePath).get();
List<Managers> managers = HwvtepHAUtil.buildManagersForHANode(Optional.ofNullable(childNode).get(), existingDstGlobalNodeOptional);
Optional<Node> globalNodeOptional = configTx.read(haNodePath).get();
if (globalNodeOptional.isPresent()) {
// Also update the manager section in config which helps in cluster reboot scenarios
managers.stream().forEach(manager -> {
InstanceIdentifier<Managers> managerIid = haNodePath.augmentation(HwvtepGlobalAugmentation.class).child(Managers.class, manager.key());
configTx.put(managerIid, manager);
});
nodeCopier.copyGlobalNode(globalNodeOptional, haNodePath, childGlobalPath, CONFIGURATION, tx);
} else {
NodeBuilder nodeBuilder = new NodeBuilder().setNodeId(haNodePath.firstKeyOf(Node.class).getNodeId());
HwvtepGlobalAugmentationBuilder augBuilder = new HwvtepGlobalAugmentationBuilder();
augBuilder.setManagers(managers);
if (existingDstGlobalNodeOptional.isPresent()) {
HwvtepGlobalAugmentation srcGlobalAugmentation = existingDstGlobalNodeOptional.get().augmentation(HwvtepGlobalAugmentation.class);
if (srcGlobalAugmentation != null) {
augBuilder.setDbVersion(srcGlobalAugmentation.getDbVersion());
}
}
nodeBuilder.addAugmentation(augBuilder.build());
configTx.put(haNodePath, nodeBuilder.build());
}
} catch (ExecutionException | InterruptedException e) {
LOG.error("HAOpNodeListener Failed to read nodes {} , {} ", childGlobalPath, haNodePath);
}
});
readAndCopyChildPsOpToParent(childNode, tx);
}
use of org.opendaylight.yang.gen.v1.http.openconfig.net.yang.bgp.multiprotocol.rev151009.bgp.common.afi.safi.list.afi.safi.graceful.restart.Config in project netvirt by opendaylight.
the class HAOpNodeListener method onPsNodeAdd.
@Override
public void onPsNodeAdd(InstanceIdentifier<Node> childPsPath, Node childPsNode, TypedReadWriteTransaction<Operational> tx) {
// copy child ps oper node to ha ps oper node
// copy ha ps config node to child ps config
haOpClusteredListener.onPsNodeAdd(childPsPath, childPsNode, tx);
InstanceIdentifier<Node> childGlobalPath = HwvtepHAUtil.getGlobalNodePathFromPSNode(childPsNode);
if (!haOpClusteredListener.getConnectedNodes().contains(childGlobalPath)) {
LOG.error("HAOpNodeListener Ignoring ps node add as global node not found {}", childPsNode.getNodeId().getValue());
return;
}
if (IS_NOT_HA_CHILD.test(childGlobalPath)) {
if (!hwvtepHACache.isHAParentNode(childGlobalPath)) {
LOG.error("HAOpNodeListener Ignoring ps node add as the node is not ha child {}", childPsNode.getNodeId().getValue());
}
return;
}
LOG.info("HAOpNodeListener Ha ps child connected {} ", getNodeId(childPsPath));
InstanceIdentifier<Node> haGlobalPath = hwvtepHACache.getParent(childGlobalPath);
InstanceIdentifier<Node> haPsPath = HwvtepHAUtil.convertPsPath(childPsNode, haGlobalPath);
txRunner.callWithNewReadWriteTransactionAndSubmit(CONFIGURATION, configTx -> {
try {
nodeCopier.copyPSNode(Optional.ofNullable(childPsNode), childPsPath, haPsPath, haGlobalPath, OPERATIONAL, tx);
Optional<Node> haPsNodeOptional = configTx.read(haPsPath).get();
if (haPsNodeOptional.isPresent()) {
nodeCopier.copyPSNode(haPsNodeOptional, haPsPath, childPsPath, childGlobalPath, CONFIGURATION, tx);
} else {
PhysicalSwitchAugmentationBuilder psBuilder = new PhysicalSwitchAugmentationBuilder();
PhysicalSwitchAugmentation srcPsAugmentation = childPsNode.augmentation(PhysicalSwitchAugmentation.class);
if (srcPsAugmentation != null) {
psBuilder.setTunnelIps(srcPsAugmentation.getTunnelIps());
} else {
LOG.error("Physical Switch Augmentation is null for the child ps node: {}", childPsNode);
}
// setting tunnel ip and termination points in the parent node
List<TerminationPoint> terminationPoints = getTerminationPointForConfig(childPsNode);
// for (TerminationPoint terminationPoint: terminationPoints) {
// HwvtepTerminationPointCache.getInstance().addTerminationPoint(haGlobalPath, terminationPoint);
// }
NodeBuilder nodeBuilder = new NodeBuilder().setNodeId(haPsPath.firstKeyOf(Node.class).getNodeId());
nodeBuilder.addAugmentation(psBuilder.build());
LOG.info("HAOpNodeListener creating the HAParent PhysicalSwitch {}", haPsPath);
configTx.put(haPsPath, nodeBuilder.setTerminationPoint(terminationPoints).build());
}
} catch (ExecutionException | InterruptedException e) {
LOG.error("Failed to read nodes {} , {} ", childPsPath, haGlobalPath);
}
});
}
Aggregations