use of org.opendaylight.controller.md.sal.common.api.data.ReadFailedException in project netvirt by opendaylight.
the class EvpnMacVrfUtils method getElanNameByMacvrfiid.
public String getElanNameByMacvrfiid(InstanceIdentifier<MacVrfEntry> instanceIdentifier) {
try (ReadOnlyTransaction tx = dataBroker.newReadOnlyTransaction()) {
String rd = instanceIdentifier.firstKeyOf(VrfTables.class).getRouteDistinguisher();
String elanName = null;
InstanceIdentifier<EvpnRdToNetwork> iidEvpnRdToNet = InstanceIdentifier.builder(EvpnRdToNetworks.class).child(EvpnRdToNetwork.class, new EvpnRdToNetworkKey(rd)).build();
try {
Optional<EvpnRdToNetwork> evpnRdToNetwork = tx.read(LogicalDatastoreType.CONFIGURATION, iidEvpnRdToNet).checkedGet();
if (evpnRdToNetwork.isPresent()) {
elanName = evpnRdToNetwork.get().getNetworkId();
}
} catch (ReadFailedException e) {
LOG.error("getElanName: unable to read elanName, exception ", e);
}
return elanName;
}
}
use of org.opendaylight.controller.md.sal.common.api.data.ReadFailedException in project netvirt by opendaylight.
the class EvpnUtils method getDcGatewayIpList.
public Optional<DcGatewayIpList> getDcGatewayIpList() {
InstanceIdentifier<DcGatewayIpList> dcGatewayIpListInstanceIdentifier = getDcGatewayIpListIdentifier();
DcGatewayIpList dcGatewayIpListConfig = null;
try {
dcGatewayIpListConfig = elanUtils.read2(LogicalDatastoreType.CONFIGURATION, dcGatewayIpListInstanceIdentifier).orNull();
} catch (ReadFailedException e) {
LOG.error("getDcGatewayTunnelInterfaceNameList: unable to read DcGatewayTunnelList, exception ", e);
}
return Optional.fromNullable(dcGatewayIpListConfig);
}
use of org.opendaylight.controller.md.sal.common.api.data.ReadFailedException in project netvirt by opendaylight.
the class NodeCopier method copyPSNode.
@Override
public void copyPSNode(Optional<Node> srcPsNodeOptional, InstanceIdentifier<Node> srcPsPath, InstanceIdentifier<Node> dstPsPath, InstanceIdentifier<Node> dstGlobalPath, LogicalDatastoreType logicalDatastoreType, ReadWriteTransaction tx) throws ReadFailedException {
if (!srcPsNodeOptional.isPresent() && logicalDatastoreType == CONFIGURATION) {
Futures.addCallback(tx.read(logicalDatastoreType, srcPsPath), new FutureCallback<Optional<Node>>() {
@Override
public void onSuccess(Optional<Node> nodeOptional) {
HAJobScheduler.getInstance().submitJob(() -> {
try {
ReadWriteTransaction tx1 = new BatchedTransaction();
if (nodeOptional.isPresent()) {
copyPSNode(nodeOptional, srcPsPath, dstPsPath, dstGlobalPath, logicalDatastoreType, tx1);
} else {
/**
* Deleting node please refer @see #copyGlobalNode for explanation
*/
HwvtepHAUtil.deleteNodeIfPresent(tx1, logicalDatastoreType, dstPsPath);
}
} catch (ReadFailedException e) {
LOG.error("Failed to read src node {}", srcPsNodeOptional.get());
}
});
}
@Override
public void onFailure(Throwable throwable) {
}
});
return;
}
NodeBuilder dstPsNodeBuilder = HwvtepHAUtil.getNodeBuilderForPath(dstPsPath);
PhysicalSwitchAugmentationBuilder dstPsAugmentationBuilder = new PhysicalSwitchAugmentationBuilder();
PhysicalSwitchAugmentation srcPsAugmenatation = srcPsNodeOptional.get().getAugmentation(PhysicalSwitchAugmentation.class);
Node existingDstPsNode = HwvtepHAUtil.readNode(tx, logicalDatastoreType, dstPsPath);
PhysicalSwitchAugmentation existingDstPsAugmentation = HwvtepHAUtil.getPhysicalSwitchAugmentationOfNode(existingDstPsNode);
if (OPERATIONAL == logicalDatastoreType) {
psAugmentationMerger.mergeOperationalData(dstPsAugmentationBuilder, existingDstPsAugmentation, srcPsAugmenatation, dstPsPath);
psNodeMerger.mergeOperationalData(dstPsNodeBuilder, existingDstPsNode, srcPsNodeOptional.get(), dstPsPath);
} else {
psAugmentationMerger.mergeConfigData(dstPsAugmentationBuilder, srcPsAugmenatation, dstPsPath);
psNodeMerger.mergeConfigData(dstPsNodeBuilder, srcPsNodeOptional.get(), dstPsPath);
}
mergeOpManagedByAttributes(srcPsAugmenatation, dstPsAugmentationBuilder, dstGlobalPath);
dstPsNodeBuilder.addAugmentation(PhysicalSwitchAugmentation.class, dstPsAugmentationBuilder.build());
Node dstPsNode = dstPsNodeBuilder.build();
tx.merge(logicalDatastoreType, dstPsPath, dstPsNode, true);
LOG.debug("Copied {} physical switch node from {} to {}", logicalDatastoreType, srcPsPath, dstPsPath);
}
use of org.opendaylight.controller.md.sal.common.api.data.ReadFailedException in project netvirt by opendaylight.
the class NodeCopier method copyGlobalNode.
@Override
public void copyGlobalNode(Optional<Node> srcGlobalNodeOptional, InstanceIdentifier<Node> srcPath, InstanceIdentifier<Node> dstPath, LogicalDatastoreType logicalDatastoreType, ReadWriteTransaction tx) throws ReadFailedException {
if (!srcGlobalNodeOptional.isPresent() && logicalDatastoreType == CONFIGURATION) {
Futures.addCallback(tx.read(logicalDatastoreType, srcPath), new FutureCallback<Optional<Node>>() {
@Override
public void onSuccess(Optional<Node> nodeOptional) {
HAJobScheduler.getInstance().submitJob(() -> {
try {
ReadWriteTransaction tx1 = new BatchedTransaction();
if (nodeOptional.isPresent()) {
copyGlobalNode(nodeOptional, srcPath, dstPath, logicalDatastoreType, tx1);
} else {
/**
* In case the Parent HA Global Node is not present and Child HA node is present
* It means that both the child are disconnected/removed hence the parent is deleted.
* @see org.opendaylight.netvirt.elan.l2gw.ha.listeners.HAOpNodeListener
* OnGLobalNode() delete function
* So we should delete the existing config child node as cleanup
*/
HwvtepHAUtil.deleteNodeIfPresent(tx1, logicalDatastoreType, dstPath);
}
} catch (ReadFailedException e) {
LOG.error("Failed to read source node {}", srcPath);
}
});
}
@Override
public void onFailure(Throwable throwable) {
}
});
return;
}
HwvtepGlobalAugmentation srcGlobalAugmentation = srcGlobalNodeOptional.get().getAugmentation(HwvtepGlobalAugmentation.class);
if (srcGlobalAugmentation == null) {
/**
* If Source HA Global Node is not present
* It means that both the child are disconnected/removed hence the parent is deleted.
* @see org.opendaylight.netvirt.elan.l2gw.ha.listeners.HAOpNodeListener OnGLobalNode() delete function
* So we should delete the existing config child node as cleanup
*/
HwvtepHAUtil.deleteNodeIfPresent(tx, logicalDatastoreType, dstPath);
return;
}
NodeBuilder haNodeBuilder = HwvtepHAUtil.getNodeBuilderForPath(dstPath);
HwvtepGlobalAugmentationBuilder haBuilder = new HwvtepGlobalAugmentationBuilder();
Optional<Node> existingDstGlobalNodeOptional = tx.read(logicalDatastoreType, dstPath).checkedGet();
Node existingDstGlobalNode = existingDstGlobalNodeOptional.isPresent() ? existingDstGlobalNodeOptional.get() : null;
HwvtepGlobalAugmentation existingHAGlobalData = HwvtepHAUtil.getGlobalAugmentationOfNode(existingDstGlobalNode);
globalAugmentationMerger.mergeOperationalData(haBuilder, existingHAGlobalData, srcGlobalAugmentation, dstPath);
globalNodeMerger.mergeOperationalData(haNodeBuilder, existingDstGlobalNode, srcGlobalNodeOptional.get(), dstPath);
if (OPERATIONAL == logicalDatastoreType) {
haBuilder.setManagers(HwvtepHAUtil.buildManagersForHANode(srcGlobalNodeOptional.get(), existingDstGlobalNodeOptional));
// Also update the manager section in config which helps in cluster reboot scenarios
haBuilder.getManagers().stream().forEach((manager) -> {
InstanceIdentifier<Managers> managerIid = dstPath.augmentation(HwvtepGlobalAugmentation.class).child(Managers.class, manager.getKey());
tx.put(CONFIGURATION, managerIid, manager, true);
});
}
haBuilder.setDbVersion(srcGlobalAugmentation.getDbVersion());
haNodeBuilder.addAugmentation(HwvtepGlobalAugmentation.class, haBuilder.build());
Node haNode = haNodeBuilder.build();
if (OPERATIONAL == logicalDatastoreType) {
tx.merge(logicalDatastoreType, dstPath, haNode, true);
} else {
tx.put(logicalDatastoreType, dstPath, haNode, true);
}
}
use of org.opendaylight.controller.md.sal.common.api.data.ReadFailedException in project netvirt by opendaylight.
the class MergeCommandsAggregator method mergeUpdate.
public void mergeUpdate(InstanceIdentifier<Node> dstPath, DataObjectModification mod, LogicalDatastoreType datastoreType, ReadWriteTransaction tx) {
if (mod == null) {
return;
}
Collection<DataObjectModification> modifications = mod.getModifiedChildren();
modifications.stream().filter(modification -> skipCopy.negate().test(datastoreType, modification.getDataType())).filter(modification -> commands.get(modification.getDataType()) != null).peek(modification -> LOG.debug("Received {} modification {} copy/delete to {}", datastoreType, modification, dstPath)).forEach(modification -> {
MergeCommand mergeCommand = commands.get(modification.getDataType());
boolean create = modification.getDataAfter() != null;
DataObject data = create ? modification.getDataAfter() : modification.getDataBefore();
InstanceIdentifier<DataObject> transformedId = mergeCommand.generateId(dstPath, data);
DataObject transformedItem = mergeCommand.transform(dstPath, data);
Optional<DataObject> existingDataOptional = null;
try {
existingDataOptional = tx.read(datastoreType, transformedId).checkedGet();
} catch (ReadFailedException ex) {
LOG.error("Failed to read data {} from {}", transformedId, datastoreType);
return;
}
String destination = datastoreType == CONFIGURATION ? "child" : "parent";
if (create) {
if (isDataUpdated(existingDataOptional, transformedItem)) {
LOG.debug("Copy to {} {} {}", destination, datastoreType, transformedId);
tx.put(datastoreType, transformedId, transformedItem, true);
} else {
LOG.debug("Data not updated skip copy to {}", transformedId);
}
} else {
if (existingDataOptional.isPresent()) {
LOG.debug("Delete from {} {} {}", destination, datastoreType, transformedId);
tx.delete(datastoreType, transformedId);
} else {
LOG.debug("Delete skipped for {}", transformedId);
}
}
});
}
Aggregations