use of com.emc.storageos.api.service.impl.resource.blockingestorchestration.context.impl.RpVplexVolumeIngestionContext in project coprhd-controller by CoprHD.
the class BlockRecoverPointIngestOrchestrator method performRPExportIngestion.
/**
* RecoverPoint volumes are expected to have export masks where the volume is exported to
* a RecoverPoint site. Therefore every RP volume (sources, targets, journals) will need to
* go through this code and have their export mask ingested. Even if the mask has already been
* ingested by a previous volume ingestion, this method still needs to update the ExportGroup and
* ExportMask objects to reflect the newly ingested volume as part of its management.
*
* @param volumeContext the RecoverPointVolumeIngestionContext for the volume currently being ingested
* @param unManagedVolume unmanaged volume
* @param volume managed volume
* @return managed volume with export ingested
*/
private void performRPExportIngestion(IngestionRequestContext parentRequestContext, RecoverPointVolumeIngestionContext volumeContext, UnManagedVolume unManagedVolume, Volume volume) {
_logger.info("starting RecoverPoint export ingestion for volume {}", volume.forDisplay());
Project project = volumeContext.getProject();
ProtectionSystem protectionSystem = _dbClient.queryObject(ProtectionSystem.class, volume.getProtectionController());
StorageSystem storageSystem = _dbClient.queryObject(StorageSystem.class, volume.getStorageController());
List<UnManagedExportMask> unManagedRPExportMasks = findUnManagedRPExportMask(protectionSystem, unManagedVolume);
if (unManagedRPExportMasks.isEmpty()) {
_logger.error("Could not find any unmanaged export masks associated with volume: " + unManagedVolume.getLabel());
throw IngestionException.exceptions.noUnManagedExportMaskFound(unManagedVolume.getNativeGuid());
}
// Keep a map for internal site name name and varray
Map<String, VirtualArray> internalSiteToVarrayMap = new HashMap<String, VirtualArray>();
internalSiteToVarrayMap.put(volume.getInternalSiteName(), volumeContext.getVarray(unManagedVolume));
// If this is a MetroPoint volume we're going to have multiple ExportMasks/ExportGroups to deal with.
// We'll need to query the backend volumes for extra info to populate internalSiteToVarrayMap so
// we can properly line up the ExportMasks/ExportGroups.
boolean metropoint = RPHelper.isMetroPointVolume(_dbClient, volume);
if (metropoint) {
// We need the VPLEX ingest context to get the backend volume info
VplexVolumeIngestionContext vplexVolumeContext = ((RpVplexVolumeIngestionContext) volumeContext.getVolumeContext()).getVplexVolumeIngestionContext();
for (String associatedVolumeIdStr : vplexVolumeContext.getAssociatedVolumeIds(volume)) {
// Find the associated volumes using the context maps or the db if they are already there
Volume associatedVolume = VolumeIngestionUtil.findVolume(_dbClient, vplexVolumeContext.getBlockObjectsToBeCreatedMap(), vplexVolumeContext.getDataObjectsToBeUpdatedMap(), associatedVolumeIdStr);
String internalSiteName = associatedVolume.getInternalSiteName();
// If we don't already have an entry for this internal site name, let's add it now.
if (!internalSiteToVarrayMap.containsKey(internalSiteName)) {
internalSiteToVarrayMap.put(internalSiteName, _dbClient.queryObject(VirtualArray.class, associatedVolume.getVirtualArray()));
}
}
}
// this will more than likely only loop once.
for (Entry<String, VirtualArray> entry : internalSiteToVarrayMap.entrySet()) {
String internalSiteName = entry.getKey();
VirtualArray virtualArray = entry.getValue();
UnManagedExportMask em = null;
if (metropoint) {
// Since we're flagged for MetroPoint we need to determine which ExportMask to use.
// We need the MetroPoint volume to be added to BOTH ExportGroups that represent the
// two Storage Views on VPLEX for cluster-1 and cluster-2.
// So let's use the varray to find the cluster we're looking for on this pass and match
// it to the maskingViewParth of the UnManagedExportMask.
// This should line things up roughly as:
// VPLEX Storage View 1 -> VPLEX Cluster1 + RPA1
// VPLEX Storage View 2 -> VPLEX Cluster2 + RPA2
String vplexCluster = ConnectivityUtil.getVplexClusterForVarray(virtualArray.getId(), storageSystem.getId(), _dbClient);
// First try and match based on UnManagedExportMask ports
for (UnManagedExportMask exportMask : unManagedRPExportMasks) {
for (String portUri : exportMask.getKnownStoragePortUris()) {
StoragePort port = _dbClient.queryObject(StoragePort.class, URI.create(portUri));
if (port != null && !port.getInactive()) {
String vplexClusterForMask = ConnectivityUtil.getVplexClusterOfPort(port);
if (vplexCluster.equals(vplexClusterForMask)) {
em = exportMask;
break;
}
}
}
if (em != null) {
break;
}
}
if (em == null) {
// It really shouldn't come to this, but leaving this code just in case.
for (UnManagedExportMask exportMask : unManagedRPExportMasks) {
if (exportMask.getMaskingViewPath().contains("cluster-" + vplexCluster)) {
em = exportMask;
break;
}
}
}
} else {
em = unManagedRPExportMasks.get(0);
}
// If the mask for ingested volume is in a mask that contains JOURNAL keyword, make sure the ExportGroup created contains
// that internal flag.
boolean isJournalExport = false;
if (em.getMaskName().toLowerCase().contains(VolumeIngestionUtil.RP_JOURNAL)) {
isJournalExport = true;
}
String exportGroupGeneratedName = RPHelper.generateExportGroupName(protectionSystem, storageSystem, internalSiteName, virtualArray, isJournalExport);
ExportGroup exportGroup = VolumeIngestionUtil.verifyExportGroupExists(parentRequestContext, exportGroupGeneratedName, project.getId(), em.getKnownInitiatorUris(), virtualArray.getId(), _dbClient);
boolean exportGroupCreated = false;
if (null == exportGroup) {
exportGroupCreated = true;
Integer numPaths = em.getZoningMap().size();
_logger.info("Creating Export Group with label {}", em.getMaskName());
exportGroup = RPHelper.createRPExportGroup(exportGroupGeneratedName, virtualArray, project, numPaths, isJournalExport);
}
if (null != exportGroup) {
// check if the ExportGroup has already been fetched
ExportGroup loadedExportGroup = parentRequestContext.findExportGroup(exportGroup.getLabel(), project.getId(), virtualArray.getId(), null, null);
if (null != loadedExportGroup) {
exportGroup = loadedExportGroup;
}
}
volumeContext.setExportGroup(exportGroup);
volumeContext.setExportGroupCreated(exportGroupCreated);
volumeContext.getRpExportGroupMap().put(exportGroup, exportGroupCreated);
// set RP device initiators to be used as the "host" for export mask ingestion
List<Initiator> initiators = new ArrayList<Initiator>();
Iterator<Initiator> initiatorItr = _dbClient.queryIterativeObjects(Initiator.class, URIUtil.toURIList(em.getKnownInitiatorUris()));
while (initiatorItr.hasNext()) {
initiators.add(initiatorItr.next());
}
volumeContext.setDeviceInitiators(initiators);
// find the ingest export strategy and call into for this unmanaged export mask
IngestExportStrategy ingestStrategy = ingestStrategyFactory.buildIngestExportStrategy(unManagedVolume);
volume = ingestStrategy.ingestExportMasks(unManagedVolume, volume, volumeContext);
if (null == volume) {
// ingestion did not succeed, but in case it wasn't, throw one
throw IngestionException.exceptions.generalVolumeException(unManagedVolume.getLabel(), "check the logs for more details");
}
}
}
use of com.emc.storageos.api.service.impl.resource.blockingestorchestration.context.impl.RpVplexVolumeIngestionContext in project coprhd-controller by CoprHD.
the class BlockVplexVolumeIngestOrchestrator method ingestBlockObjects.
@Override
public <T extends BlockObject> T ingestBlockObjects(IngestionRequestContext requestContext, Class<T> clazz) throws IngestionException {
refreshCaches(requestContext.getStorageSystem());
UnManagedVolume unManagedVolume = requestContext.getCurrentUnmanagedVolume();
VolumeIngestionUtil.checkValidVarrayForUnmanagedVolume(unManagedVolume, requestContext.getVarray(unManagedVolume).getId(), getClusterIdToNameMap(requestContext.getStorageSystem()), getVarrayToClusterIdMap(requestContext.getStorageSystem()), _dbClient);
String vplexIngestionMethod = requestContext.getVplexIngestionMethod();
_logger.info("VPLEX ingestion method is " + vplexIngestionMethod);
boolean ingestBackend = (null == vplexIngestionMethod) || vplexIngestionMethod.isEmpty() || (!vplexIngestionMethod.equals(VplexBackendIngestionContext.INGESTION_METHOD_VVOL_ONLY));
VplexVolumeIngestionContext volumeContext = null;
boolean isRpVplexContext = requestContext.getVolumeContext() instanceof RpVplexVolumeIngestionContext;
if (isRpVplexContext) {
// if this volume is RP/VPLEX, we need to get the volume context
// from the RpVplexVolumeIngestionContext
volumeContext = ((RpVplexVolumeIngestionContext) requestContext.getVolumeContext()).getVplexVolumeIngestionContext();
} else {
// this is just a plain VPLEX volume backend ingestion
volumeContext = (VplexVolumeIngestionContext) requestContext.getVolumeContext();
}
// set the name of the cluster to which this virtual volume ingestion request's varray is connected
String clusterName = getClusterNameForVarray(requestContext.getVarray(unManagedVolume), requestContext.getStorageSystem());
volumeContext.setVirtualVolumeVplexClusterName(clusterName);
// determine if the backend has already been ingested. this could be the case if the volume is
// exported via multipe varrays or hosts and needs to be ingested for export multiple times
String volumeNativeGuid = unManagedVolume.getNativeGuid().replace(VolumeIngestionUtil.UNMANAGEDVOLUME, VolumeIngestionUtil.VOLUME);
Volume volume = VolumeIngestionUtil.checkIfVolumeExistsInDB(volumeNativeGuid, _dbClient);
boolean backendAlreadyIngested = volume != null && volume.getAssociatedVolumes() != null && !volume.getAssociatedVolumes().isEmpty();
if (backendAlreadyIngested) {
_logger.info("backend volumes have already been ingested for UnManagedVolume {}", unManagedVolume.forDisplay());
} else if (ingestBackend) {
volumeContext.setIngestionInProgress(true);
//
// If the "Only During Discovery" system setting is set, no new data will
// be fetched during ingestion. This assumes that all data has been collected
// during discovery and ingestion will fail if it can't find all the required data.
//
// If "Only During Ingestion" or "During Discovery and Ingestion" mode is set,
// then an attempt will be made to query the VPLEX api now to find any incomplete data,
// but the database will be checked first.
//
// The default mode is "Only During Discovery", so the user needs to remember
// to run discovery first on all backend storage arrays before running on the VPLEX.
//
_discoveryMode = ControllerUtils.getPropertyValueFromCoordinator(_coordinator, VplexBackendIngestionContext.DISCOVERY_MODE);
if (VplexBackendIngestionContext.DISCOVERY_MODE_DISCOVERY_ONLY.equals(_discoveryMode) || VplexBackendIngestionContext.DISCOVERY_MODE_DB_ONLY.equals(_discoveryMode)) {
volumeContext.setInDiscoveryOnlyMode(true);
}
// the backend volumes and export masks will be part of the VPLEX project
// rather than the front-end virtual volume project, so we need to set that in the context
Project vplexProject = VPlexBlockServiceApiImpl.getVplexProject(requestContext.getStorageSystem(), _dbClient, _tenantsService);
volumeContext.setBackendProject(vplexProject);
volumeContext.setFrontendProject(requestContext.getProject());
try {
_logger.info("Ingesting backend structure of VPLEX virtual volume {}", unManagedVolume.getLabel());
validateContext(requestContext.getVpool(unManagedVolume), requestContext.getTenant(), volumeContext);
ingestBackendVolumes(requestContext, volumeContext);
ingestBackendExportMasks(requestContext, volumeContext);
_logger.info("Backend ingestion ended:" + volumeContext.toStringDebug());
} catch (Exception ex) {
_logger.error("error during VPLEX backend ingestion: ", ex);
throw IngestionException.exceptions.failedToIngestVplexBackend(ex.getLocalizedMessage());
}
}
_logger.info("Ingesting VPLEX virtual volume {}", unManagedVolume.getLabel());
T virtualVolume = super.ingestBlockObjects(requestContext, clazz);
return virtualVolume;
}
use of com.emc.storageos.api.service.impl.resource.blockingestorchestration.context.impl.RpVplexVolumeIngestionContext in project coprhd-controller by CoprHD.
the class VolumeIngestionUtil method validateExportMaskMatchesVplexCluster.
/**
* Validates that the given UnManagedExportMask exists on the same VPLEX Cluster
* as the VirtualArray in the ingestion request. The cluster name is actually
* set by the BlockVplexIngestOrchestrator in order to re-use the cluster-id-to-name
* cache, avoiding a expensive call to get cluster name info from the VPLEX API.
*
* @param requestContext the current IngestionRequestContext
* @param unManagedVolume the current UnManagedVolume being processed for exports
* @param unManagedExportMask the current UnManagdExportMask being processed
*
* @return true if the mask exists on the same VPLEX cluster as the ingestion request VirtualArray
*/
public static boolean validateExportMaskMatchesVplexCluster(IngestionRequestContext requestContext, UnManagedVolume unManagedVolume, UnManagedExportMask unManagedExportMask) {
VolumeIngestionContext volumeContext = requestContext.getRootIngestionRequestContext().getProcessedVolumeContext(unManagedVolume.getNativeGuid());
if (volumeContext == null) {
// just get the current one
volumeContext = requestContext.getVolumeContext();
}
if (volumeContext != null && volumeContext instanceof RpVplexVolumeIngestionContext) {
volumeContext = ((RpVplexVolumeIngestionContext) volumeContext).getVplexVolumeIngestionContext();
}
if (volumeContext != null && volumeContext instanceof VplexVolumeIngestionContext) {
String clusterName = ((VplexVolumeIngestionContext) volumeContext).getVirtualVolumeVplexClusterName();
String maskingViewPath = unManagedExportMask.getMaskingViewPath();
_logger.info("cluster name is {} and masking view path is {}", clusterName, maskingViewPath);
if (clusterName != null && maskingViewPath != null) {
String startOfPath = VPlexApiConstants.URI_CLUSTERS_RELATIVE + clusterName;
// for this ingestion request) overlaps the masking view path, then we are on the right vplex cluster
if (maskingViewPath.startsWith(startOfPath)) {
_logger.info("\tUnManagedExportMask {} is on VPLEX cluster {} and will be processed now", unManagedExportMask.getMaskName(), clusterName);
return true;
}
}
}
_logger.warn("\tUnManagedExportMask {} is not on the right VPLEX cluster for this ingestion request", unManagedExportMask.getMaskName());
return false;
}
use of com.emc.storageos.api.service.impl.resource.blockingestorchestration.context.impl.RpVplexVolumeIngestionContext in project coprhd-controller by CoprHD.
the class BlockRecoverPointIngestOrchestrator method clearReplicaFlagsInIngestionContext.
/**
* Clear the flags of replicas which have been updated during the ingestion process
*
* @param volumeContext
* @param volumes RP volumes
*/
private void clearReplicaFlagsInIngestionContext(RecoverPointVolumeIngestionContext volumeContext, List<Volume> volumes) {
for (Set<DataObject> updatedObjects : volumeContext.getDataObjectsToBeUpdatedMap().values()) {
for (DataObject updatedObject : updatedObjects) {
if (updatedObject instanceof BlockMirror || updatedObject instanceof BlockSnapshot || updatedObject instanceof BlockSnapshotSession || (updatedObject instanceof Volume && !NullColumnValueGetter.isNullURI(((Volume) updatedObject).getAssociatedSourceVolume()))) {
_logger.info("Clearing internal volume flag of replica {} of RP volume ", updatedObject.getLabel());
updatedObject.clearInternalFlags(INTERNAL_VOLUME_FLAGS);
}
}
}
// We need to look for all snapshots and snapshot session in the contexts related to the rp volumes and its backend volumes and
// clear their flags.
List<String> rpVolumes = new ArrayList<String>();
for (Volume volume : volumes) {
rpVolumes.add(volume.getId().toString());
if (RPHelper.isVPlexVolume(volume, _dbClient) && volumeContext instanceof RpVplexVolumeIngestionContext) {
VplexVolumeIngestionContext vplexVolumeContext = ((RpVplexVolumeIngestionContext) volumeContext.getVolumeContext()).getVplexVolumeIngestionContext();
StringSet associatedVolumes = vplexVolumeContext.getAssociatedVolumeIds(volume);
rpVolumes.addAll(associatedVolumes);
}
}
for (VolumeIngestionContext volumeIngestionContext : volumeContext.getRootIngestionRequestContext().getProcessedUnManagedVolumeMap().values()) {
if (volumeIngestionContext instanceof IngestionRequestContext) {
for (Set<DataObject> objectsToBeUpdated : ((IngestionRequestContext) volumeIngestionContext).getDataObjectsToBeUpdatedMap().values()) {
for (DataObject o : objectsToBeUpdated) {
if (o instanceof BlockSnapshot && rpVolumes.contains(((BlockSnapshot) o).getParent().getURI().toString())) {
_logger.info("Clearing internal volume flag of BlockSnapshot {} of RP volume ", o.getLabel());
o.clearInternalFlags(INTERNAL_VOLUME_FLAGS);
} else if (o instanceof BlockSnapshotSession && rpVolumes.contains(((BlockSnapshotSession) o).getParent().getURI().toString())) {
_logger.info("Clearing internal volume flag of BlockSnapshotSession {} of RP volume ", o.getLabel());
o.clearInternalFlags(INTERNAL_VOLUME_FLAGS);
}
}
}
}
}
}
use of com.emc.storageos.api.service.impl.resource.blockingestorchestration.context.impl.RpVplexVolumeIngestionContext in project coprhd-controller by CoprHD.
the class BlockRecoverPointIngestOrchestrator method decorateUpdatesForRPSource.
/**
* Perform updates of the managed volume and associated unmanaged volumes and protection sets
* given an RP source volume getting ingested.
*
* @param volumeContext the RecoverPointVolumeIngestionContext for the volume currently being ingested
* @param volume managed volume
* @param unManagedVolume unmanaged volume
*/
private void decorateUpdatesForRPSource(RecoverPointVolumeIngestionContext volumeContext, Volume volume, UnManagedVolume unManagedVolume) {
StringSetMap unManagedVolumeInformation = unManagedVolume.getVolumeInformation();
volume.setPersonality(PersonalityTypes.SOURCE.toString());
volume.setAccessState(Volume.VolumeAccessState.READWRITE.toString());
volume.setLinkStatus(Volume.LinkStatus.IN_SYNC.toString());
// For RP+VPLEX Distributed and MetroPoint volumes, we want to set the
// internal site and copy names on the backing volumes. This helps when identifying
// which Export Groups the volume belongs to on the VPLEX.
//
// For MetroPoint, the same VPLEX Distributed/Metro volume will be exported to
// two VPLEX Export Groups (aka Storage Views). One for each RPA Cluster in the
// MetroPoint configuration.
boolean isVPlexDistributedVolume = false;
if (volumeContext.getVolumeContext() instanceof RpVplexVolumeIngestionContext) {
VplexVolumeIngestionContext vplexVolumeContext = ((RpVplexVolumeIngestionContext) volumeContext.getVolumeContext()).getVplexVolumeIngestionContext();
isVPlexDistributedVolume = vplexVolumeContext.getAssociatedVolumeIds(volume).size() > 1;
}
if (isVPlexDistributedVolume) {
// Get the internal site and copy names
String rpInternalSiteName = PropertySetterUtil.extractValueFromStringSet(SupportedVolumeInformation.RP_INTERNAL_SITENAME.toString(), unManagedVolumeInformation);
String rpCopyName = PropertySetterUtil.extractValueFromStringSet(SupportedVolumeInformation.RP_COPY_NAME.toString(), unManagedVolumeInformation);
String rpStandbyInternalSiteName = PropertySetterUtil.extractValueFromStringSet(SupportedVolumeInformation.RP_STANDBY_INTERNAL_SITENAME.toString(), unManagedVolumeInformation);
String rpStandbyCopyName = PropertySetterUtil.extractValueFromStringSet(SupportedVolumeInformation.RP_STANDBY_COPY_NAME.toString(), unManagedVolumeInformation);
// We need the VPLEX ingest context to get the backend volume info
VplexVolumeIngestionContext vplexVolumeContext = ((RpVplexVolumeIngestionContext) volumeContext.getVolumeContext()).getVplexVolumeIngestionContext();
// Non-matching varrays will be the VPLEX HA side.
for (String associatedVolumeIdStr : vplexVolumeContext.getAssociatedVolumeIds(volume)) {
// Find the associated volumes using the context maps or the db if they are already there
Volume associatedVolume = VolumeIngestionUtil.findVolume(_dbClient, vplexVolumeContext.getBlockObjectsToBeCreatedMap(), vplexVolumeContext.getDataObjectsToBeUpdatedMap(), associatedVolumeIdStr);
// If we can't get the a handle on the associated volume we'll have to throw an exception
if (associatedVolume == null) {
_logger.error("Could not find associated volume: " + associatedVolumeIdStr + " in DB. Ingestion failed.");
throw IngestionException.exceptions.generalVolumeException(unManagedVolume.getNativeGuid(), "Could not find associated volume: " + associatedVolumeIdStr + ", for VPLEX volume: " + volume.getLabel());
}
// Compare the varrays for the associated volume and its VPLEX virtual volume
if (associatedVolume.getVirtualArray().equals(volume.getVirtualArray())) {
associatedVolume.setInternalSiteName(rpInternalSiteName);
associatedVolume.setRpCopyName(rpCopyName);
} else {
// If this is a RP+VPLEX Distributed volume (not MP) there is the potential that
// rpStandbyInternalSiteName and rpStandbyCopyName could be null, which is fine.
associatedVolume.setInternalSiteName(rpStandbyInternalSiteName);
associatedVolume.setRpCopyName(rpStandbyCopyName);
}
}
}
// When we ingest a source volume, we need to properly create the RP Target list for that source,
// however it is possible that not all (or any) of the RP targets have been ingested yet. Therefore
// we need to do as much as we can:
//
// 1. Process each managed target volume ID in the unmanaged source volume, add to the managed source volume's RP target list.
// 2. Go through each unmanaged RP target volume in the unmanaged source volume (before it goes away), add the managed source volume
// ID.
// 3. Go through each unmanaged RP target volume in the unmanaged source volume, remove the unmanaged source volume ID.
// 1. Process each managed target volume ID in the unmanaged source volume, add to the managed source volume's RP target list.
StringSet rpManagedTargetVolumeIdStrs = PropertySetterUtil.extractValuesFromStringSet(SupportedVolumeInformation.RP_MANAGED_TARGET_VOLUMES.toString(), unManagedVolumeInformation);
_logger.info("adding managed RecoverPoint targets volumes: " + rpManagedTargetVolumeIdStrs);
for (String rpManagedTargetVolumeIdStr : rpManagedTargetVolumeIdStrs) {
// Check to make sure the target volume is legit.
Volume managedTargetVolume = null;
BlockObject bo = volumeContext.getRootIngestionRequestContext().findCreatedBlockObject(URI.create(rpManagedTargetVolumeIdStr));
if (bo != null && bo instanceof Volume) {
managedTargetVolume = (Volume) bo;
}
if (managedTargetVolume == null) {
_logger.error("Could not find managed target volume: " + rpManagedTargetVolumeIdStr + " in DB. Ingestion failed.");
throw IngestionException.exceptions.noManagedTargetVolumeFound(unManagedVolume.getNativeGuid(), rpManagedTargetVolumeIdStr);
}
_logger.info("\tadding RecoverPoint target volume {}", managedTargetVolume.forDisplay());
if (volume.getRpTargets() == null) {
volume.setRpTargets(new StringSet());
}
volume.getRpTargets().add(managedTargetVolume.getId().toString());
}
// 2. Go through each unmanaged RP target volume in the unmanaged source volume (before it goes away), add the managed source volume
// ID.
// 3. Go through each unmanaged RP target volume in the unmanaged source volume, remove the unmanaged source volume ID.
StringSet rpUnManagedTargetVolumeIdStrs = PropertySetterUtil.extractValuesFromStringSet(SupportedVolumeInformation.RP_UNMANAGED_TARGET_VOLUMES.toString(), unManagedVolumeInformation);
_logger.info("updating unmanaged RecoverPoint targets volumes: " + rpUnManagedTargetVolumeIdStrs);
for (String rpUnManagedTargetVolumeIdStr : rpUnManagedTargetVolumeIdStrs) {
UnManagedVolume unManagedTargetVolume = _dbClient.queryObject(UnManagedVolume.class, URI.create(rpUnManagedTargetVolumeIdStr));
if (unManagedTargetVolume == null) {
_logger.error("Could not find unmanaged target volume: " + rpUnManagedTargetVolumeIdStr + " in DB. Ingestion failed.");
throw IngestionException.exceptions.noUnManagedTargetVolumeFound(unManagedVolume.getNativeGuid(), rpUnManagedTargetVolumeIdStr);
}
// (2) Add the managed source volume ID to this target that hasn't been ingested yet, so when it IS ingested, we know
// what RP source it belongs to.
StringSet rpManagedSourceVolumeId = new StringSet();
rpManagedSourceVolumeId.add(volume.getId().toString());
unManagedTargetVolume.putVolumeInfo(SupportedVolumeInformation.RP_MANAGED_SOURCE_VOLUME.toString(), rpManagedSourceVolumeId);
// (3) Remove the unmanaged source volume ID to this target that is going away as a result of ingestion.
// This is for completeness. The ID is going away in the DB, so we don't want any references to it anywhere.
StringSet rpUnManagedSourceVolumeId = new StringSet();
unManagedTargetVolume.putVolumeInfo(SupportedVolumeInformation.RP_UNMANAGED_SOURCE_VOLUME.toString(), rpUnManagedSourceVolumeId);
volumeContext.addUnmanagedTargetVolumeToUpdate(unManagedTargetVolume);
}
}
Aggregations