use of com.emc.storageos.db.client.model.UnManagedDiscoveredObjects.UnManagedVolume in project coprhd-controller by CoprHD.
the class BlockSnapIngestOrchestrator method createSnapshot.
private BlockSnapshot createSnapshot(IngestionRequestContext requestContext, String nativeGuid) throws IngestionException {
UnManagedVolume unManagedVolume = requestContext.getCurrentUnmanagedVolume();
BlockSnapshot snapShot = new BlockSnapshot();
snapShot.setId(URIUtil.createId(BlockSnapshot.class));
snapShot.setNativeGuid(nativeGuid);
updateBlockObjectNativeIds(snapShot, unManagedVolume);
StringSetMap unManagedVolumeInformation = unManagedVolume.getVolumeInformation();
String deviceLabel = PropertySetterUtil.extractValueFromStringSet(SupportedVolumeInformation.DEVICE_LABEL.toString(), unManagedVolumeInformation);
if (null == deviceLabel || deviceLabel.trim().isEmpty()) {
deviceLabel = nativeGuid;
}
// In case of XIO snaps, the snapshots belong to a snapset which represents the snapshot CG. This will be
// populated in SNAPSHOT_CONSISTENCY_GROUP_NAME
// The same is applicable to external device snapshots
String snapsetName = PropertySetterUtil.extractValueFromStringSet(SupportedVolumeInformation.SNAPSHOT_CONSISTENCY_GROUP_NAME.toString(), unManagedVolumeInformation);
if (null == snapsetName || snapsetName.trim().isEmpty()) {
snapsetName = deviceLabel;
}
snapShot.setSnapsetLabel(snapsetName);
snapShot.setStorageController(requestContext.getStorageSystem().getId());
String systemType = requestContext.getStorageSystem().checkIfVmax3() ? DiscoveredDataObject.Type.vmax3.name() : requestContext.getStorageSystem().getSystemType();
snapShot.setSystemType(systemType);
snapShot.setVirtualArray(requestContext.getVarray(unManagedVolume).getId());
snapShot.setProject(new NamedURI(requestContext.getProject().getId(), snapShot.getLabel()));
snapShot.setWWN(unManagedVolume.getWwn());
String allocatedCapacity = PropertySetterUtil.extractValueFromStringSet(SupportedVolumeInformation.ALLOCATED_CAPACITY.toString(), unManagedVolume.getVolumeInformation());
String provisionedCapacity = PropertySetterUtil.extractValueFromStringSet(SupportedVolumeInformation.PROVISIONED_CAPACITY.toString(), unManagedVolume.getVolumeInformation());
snapShot.setAllocatedCapacity(Long.parseLong(allocatedCapacity));
snapShot.setProvisionedCapacity(Long.parseLong(provisionedCapacity));
String syncActive = PropertySetterUtil.extractValueFromStringSet(SupportedVolumeInformation.IS_SYNC_ACTIVE.toString(), unManagedVolume.getVolumeInformation());
Boolean isSyncActive = (null != syncActive) ? Boolean.parseBoolean(syncActive) : false;
snapShot.setIsSyncActive(isSyncActive);
String readOnly = PropertySetterUtil.extractValueFromStringSet(SupportedVolumeInformation.IS_READ_ONLY.toString(), unManagedVolume.getVolumeInformation());
Boolean isReadOnly = (null != readOnly) ? Boolean.parseBoolean(readOnly) : false;
snapShot.setIsReadOnly(isReadOnly);
String settingsInstance = PropertySetterUtil.extractValueFromStringSet(SupportedVolumeInformation.SETTINGS_INSTANCE.toString(), unManagedVolume.getVolumeInformation());
snapShot.setSettingsInstance(settingsInstance);
String needsCopyToTarget = PropertySetterUtil.extractValueFromStringSet(SupportedVolumeInformation.NEEDS_COPY_TO_TARGET.toString(), unManagedVolumeInformation);
Boolean isNeedsCopyToTarget = (null != needsCopyToTarget) ? Boolean.parseBoolean(needsCopyToTarget) : false;
snapShot.setNeedsCopyToTarget(isNeedsCopyToTarget);
String techType = PropertySetterUtil.extractValueFromStringSet(SupportedVolumeInformation.TECHNOLOGY_TYPE.toString(), unManagedVolumeInformation);
snapShot.setTechnologyType(techType);
BlockConsistencyGroup cg = getConsistencyGroup(unManagedVolume, snapShot, requestContext, _dbClient);
if (null != cg) {
requestContext.getVolumeContext().getCGObjectsToCreateMap().put(cg.getLabel(), cg);
decorateCGInfoInVolumes(cg, snapShot, requestContext, unManagedVolume);
}
return snapShot;
}
use of com.emc.storageos.db.client.model.UnManagedDiscoveredObjects.UnManagedVolume in project coprhd-controller by CoprHD.
the class BlockVolumeIngestOrchestrator method ingestBlockObjects.
@Override
protected <T extends BlockObject> T ingestBlockObjects(IngestionRequestContext requestContext, Class<T> clazz) throws IngestionException {
UnManagedVolume unManagedVolume = requestContext.getCurrentUnmanagedVolume();
boolean unManagedVolumeExported = requestContext.getVolumeContext().isVolumeExported();
Volume volume = null;
List<BlockSnapshotSession> snapSessions = new ArrayList<BlockSnapshotSession>();
URI unManagedVolumeUri = unManagedVolume.getId();
String volumeNativeGuid = unManagedVolume.getNativeGuid().replace(VolumeIngestionUtil.UNMANAGEDVOLUME, VolumeIngestionUtil.VOLUME);
volume = VolumeIngestionUtil.checkIfVolumeExistsInDB(volumeNativeGuid, _dbClient);
// Check if ingested volume has export masks pending for ingestion.
if (isExportIngestionPending(volume, unManagedVolumeUri, unManagedVolumeExported)) {
return clazz.cast(volume);
}
if (null == volume) {
validateUnManagedVolume(unManagedVolume, requestContext.getVpool(unManagedVolume));
// @TODO Need to revisit this. In 8.x Provider, ReplicationGroup is automatically created when a volume is associated to a
// StorageGroup.
// checkUnManagedVolumeAddedToCG(unManagedVolume, virtualArray, tenant, project, vPool);
checkVolumeExportState(unManagedVolume, unManagedVolumeExported);
checkVPoolValidForExportInitiatorProtocols(requestContext.getVpool(unManagedVolume), unManagedVolume);
checkHostIOLimits(requestContext.getVpool(unManagedVolume), unManagedVolume, unManagedVolumeExported);
StoragePool pool = validateAndReturnStoragePoolInVAarray(unManagedVolume, requestContext.getVarray(unManagedVolume));
// validate quota is exceeded for storage systems and pools
checkSystemResourceLimitsExceeded(requestContext.getStorageSystem(), unManagedVolume, requestContext.getExhaustedStorageSystems());
checkPoolResourceLimitsExceeded(requestContext.getStorageSystem(), pool, unManagedVolume, requestContext.getExhaustedPools());
String autoTierPolicyId = getAutoTierPolicy(unManagedVolume, requestContext.getStorageSystem(), requestContext.getVpool(unManagedVolume));
validateAutoTierPolicy(autoTierPolicyId, unManagedVolume, requestContext.getVpool(unManagedVolume));
volume = createVolume(requestContext, volumeNativeGuid, pool, unManagedVolume, autoTierPolicyId);
}
if (volume != null) {
String syncActive = PropertySetterUtil.extractValueFromStringSet(SupportedVolumeInformation.IS_SYNC_ACTIVE.toString(), unManagedVolume.getVolumeInformation());
boolean isSyncActive = (null != syncActive) ? Boolean.parseBoolean(syncActive) : false;
volume.setSyncActive(isSyncActive);
if (VolumeIngestionUtil.isFullCopy(unManagedVolume)) {
_logger.info("Setting clone related properties {}", unManagedVolume.getId());
String replicaState = PropertySetterUtil.extractValueFromStringSet(SupportedVolumeInformation.REPLICA_STATE.toString(), unManagedVolume.getVolumeInformation());
volume.setReplicaState(replicaState);
String replicationGroupName = PropertySetterUtil.extractValueFromStringSet(SupportedVolumeInformation.FULL_COPY_CONSISTENCY_GROUP_NAME.toString(), unManagedVolume.getVolumeInformation());
if (replicationGroupName != null && !replicationGroupName.isEmpty()) {
volume.setReplicationGroupInstance(replicationGroupName);
}
}
// Create snapshot sessions for each synchronization aspect for the volume.
StringSet syncAspectInfoForVolume = PropertySetterUtil.extractValuesFromStringSet(SupportedVolumeInformation.SNAPSHOT_SESSIONS.toString(), unManagedVolume.getVolumeInformation());
if ((syncAspectInfoForVolume != null) && (!syncAspectInfoForVolume.isEmpty())) {
Project project = requestContext.getProject();
// If this is a vplex backend volume, then the front end project should be set as snapshot session's project
if (requestContext instanceof VplexVolumeIngestionContext && VolumeIngestionUtil.isVplexBackendVolume(unManagedVolume)) {
project = ((VplexVolumeIngestionContext) requestContext).getFrontendProject();
}
for (String syncAspectInfo : syncAspectInfoForVolume) {
String[] syncAspectInfoComponents = syncAspectInfo.split(":");
String syncAspectName = syncAspectInfoComponents[0];
String syncAspectObjPath = syncAspectInfoComponents[1];
// Make sure it is not already created.
URIQueryResultList queryResults = new URIQueryResultList();
_dbClient.queryByConstraint(AlternateIdConstraint.Factory.getBlockSnapshotSessionBySessionInstance(syncAspectObjPath), queryResults);
Iterator<URI> queryResultsIter = queryResults.iterator();
if (!queryResultsIter.hasNext()) {
BlockSnapshotSession session = new BlockSnapshotSession();
session.setId(URIUtil.createId(BlockSnapshotSession.class));
session.setLabel(syncAspectName);
session.setSessionLabel(syncAspectName);
session.setParent(new NamedURI(volume.getId(), volume.getLabel()));
session.setProject(new NamedURI(project.getId(), project.getLabel()));
session.setStorageController(volume.getStorageController());
session.setSessionInstance(syncAspectObjPath);
StringSet linkedTargetURIs = new StringSet();
URIQueryResultList snapshotQueryResults = new URIQueryResultList();
_dbClient.queryByConstraint(AlternateIdConstraint.Factory.getBlockSnapshotBySettingsInstance(syncAspectObjPath), snapshotQueryResults);
Iterator<URI> snapshotQueryResultsIter = snapshotQueryResults.iterator();
while (snapshotQueryResultsIter.hasNext()) {
linkedTargetURIs.add(snapshotQueryResultsIter.next().toString());
}
session.setLinkedTargets(linkedTargetURIs);
session.setOpStatus(new OpStatusMap());
snapSessions.add(session);
}
}
if (!snapSessions.isEmpty()) {
_dbClient.createObject(snapSessions);
}
}
}
// Note that a VPLEX backend volume can also be a snapshot target volume.
// When the VPLEX ingest orchestrator is executed, it gets the ingestion
// strategy for the backend volume and executes it. If the backend volume
// is both a snapshot and a VPLEX backend volume, this local volume ingest
// strategy is invoked and a Volume instance will result. That is fine because
// we need to represent that VPLEX backend volume. However, we also need a
// BlockSnapshot instance to represent the snapshot target volume. Therefore,
// if the unmanaged volume is also a snapshot target volume, we get and
// execute the local snapshot ingest strategy to create this BlockSnapshot
// instance and we add it to the created object list. Note that since the
// BlockSnapshot is added to the created objects list and the Volume and
// BlockSnapshot instance will have the same native GUID, we must be careful
// about adding the Volume to the created object list in the VPLEX ingestion
// strategy.
BlockObject snapshot = null;
if (VolumeIngestionUtil.isSnapshot(unManagedVolume)) {
String strategyKey = ReplicationStrategy.LOCAL.name() + "_" + VolumeType.SNAPSHOT.name();
IngestStrategy ingestStrategy = ingestStrategyFactory.getIngestStrategy(IngestStrategyEnum.getIngestStrategy(strategyKey));
snapshot = ingestStrategy.ingestBlockObjects(requestContext, BlockSnapshot.class);
requestContext.getBlockObjectsToBeCreatedMap().put(snapshot.getNativeGuid(), snapshot);
}
// Run this always when volume NO_PUBLIC_ACCESS
if (markUnManagedVolumeInactive(requestContext, volume)) {
_logger.info("All the related replicas and parent has been ingested ", unManagedVolume.getNativeGuid());
// RP masks.
if (!unManagedVolumeExported && !VolumeIngestionUtil.checkUnManagedResourceIsRecoverPointEnabled(unManagedVolume)) {
unManagedVolume.setInactive(true);
requestContext.getUnManagedVolumesToBeDeleted().add(unManagedVolume);
}
} else if (volume != null) {
_logger.info("Not all the parent/replicas of unManagedVolume {} have been ingested , hence marking as internal", unManagedVolume.getNativeGuid());
volume.addInternalFlags(INTERNAL_VOLUME_FLAGS);
for (BlockSnapshotSession snapSession : snapSessions) {
snapSession.addInternalFlags(INTERNAL_VOLUME_FLAGS);
}
_dbClient.updateObject(snapSessions);
}
return clazz.cast(volume);
}
use of com.emc.storageos.db.client.model.UnManagedDiscoveredObjects.UnManagedVolume in project coprhd-controller by CoprHD.
the class BlockVplexVolumeIngestOrchestrator method ingestBlockObjects.
@Override
public <T extends BlockObject> T ingestBlockObjects(IngestionRequestContext requestContext, Class<T> clazz) throws IngestionException {
refreshCaches(requestContext.getStorageSystem());
UnManagedVolume unManagedVolume = requestContext.getCurrentUnmanagedVolume();
VolumeIngestionUtil.checkValidVarrayForUnmanagedVolume(unManagedVolume, requestContext.getVarray(unManagedVolume).getId(), getClusterIdToNameMap(requestContext.getStorageSystem()), getVarrayToClusterIdMap(requestContext.getStorageSystem()), _dbClient);
String vplexIngestionMethod = requestContext.getVplexIngestionMethod();
_logger.info("VPLEX ingestion method is " + vplexIngestionMethod);
boolean ingestBackend = (null == vplexIngestionMethod) || vplexIngestionMethod.isEmpty() || (!vplexIngestionMethod.equals(VplexBackendIngestionContext.INGESTION_METHOD_VVOL_ONLY));
VplexVolumeIngestionContext volumeContext = null;
boolean isRpVplexContext = requestContext.getVolumeContext() instanceof RpVplexVolumeIngestionContext;
if (isRpVplexContext) {
// if this volume is RP/VPLEX, we need to get the volume context
// from the RpVplexVolumeIngestionContext
volumeContext = ((RpVplexVolumeIngestionContext) requestContext.getVolumeContext()).getVplexVolumeIngestionContext();
} else {
// this is just a plain VPLEX volume backend ingestion
volumeContext = (VplexVolumeIngestionContext) requestContext.getVolumeContext();
}
// set the name of the cluster to which this virtual volume ingestion request's varray is connected
String clusterName = getClusterNameForVarray(requestContext.getVarray(unManagedVolume), requestContext.getStorageSystem());
volumeContext.setVirtualVolumeVplexClusterName(clusterName);
// determine if the backend has already been ingested. this could be the case if the volume is
// exported via multipe varrays or hosts and needs to be ingested for export multiple times
String volumeNativeGuid = unManagedVolume.getNativeGuid().replace(VolumeIngestionUtil.UNMANAGEDVOLUME, VolumeIngestionUtil.VOLUME);
Volume volume = VolumeIngestionUtil.checkIfVolumeExistsInDB(volumeNativeGuid, _dbClient);
boolean backendAlreadyIngested = volume != null && volume.getAssociatedVolumes() != null && !volume.getAssociatedVolumes().isEmpty();
if (backendAlreadyIngested) {
_logger.info("backend volumes have already been ingested for UnManagedVolume {}", unManagedVolume.forDisplay());
} else if (ingestBackend) {
volumeContext.setIngestionInProgress(true);
//
// If the "Only During Discovery" system setting is set, no new data will
// be fetched during ingestion. This assumes that all data has been collected
// during discovery and ingestion will fail if it can't find all the required data.
//
// If "Only During Ingestion" or "During Discovery and Ingestion" mode is set,
// then an attempt will be made to query the VPLEX api now to find any incomplete data,
// but the database will be checked first.
//
// The default mode is "Only During Discovery", so the user needs to remember
// to run discovery first on all backend storage arrays before running on the VPLEX.
//
_discoveryMode = ControllerUtils.getPropertyValueFromCoordinator(_coordinator, VplexBackendIngestionContext.DISCOVERY_MODE);
if (VplexBackendIngestionContext.DISCOVERY_MODE_DISCOVERY_ONLY.equals(_discoveryMode) || VplexBackendIngestionContext.DISCOVERY_MODE_DB_ONLY.equals(_discoveryMode)) {
volumeContext.setInDiscoveryOnlyMode(true);
}
// the backend volumes and export masks will be part of the VPLEX project
// rather than the front-end virtual volume project, so we need to set that in the context
Project vplexProject = VPlexBlockServiceApiImpl.getVplexProject(requestContext.getStorageSystem(), _dbClient, _tenantsService);
volumeContext.setBackendProject(vplexProject);
volumeContext.setFrontendProject(requestContext.getProject());
try {
_logger.info("Ingesting backend structure of VPLEX virtual volume {}", unManagedVolume.getLabel());
validateContext(requestContext.getVpool(unManagedVolume), requestContext.getTenant(), volumeContext);
ingestBackendVolumes(requestContext, volumeContext);
ingestBackendExportMasks(requestContext, volumeContext);
_logger.info("Backend ingestion ended:" + volumeContext.toStringDebug());
} catch (Exception ex) {
_logger.error("error during VPLEX backend ingestion: ", ex);
throw IngestionException.exceptions.failedToIngestVplexBackend(ex.getLocalizedMessage());
}
}
_logger.info("Ingesting VPLEX virtual volume {}", unManagedVolume.getLabel());
T virtualVolume = super.ingestBlockObjects(requestContext, clazz);
return virtualVolume;
}
use of com.emc.storageos.db.client.model.UnManagedDiscoveredObjects.UnManagedVolume in project coprhd-controller by CoprHD.
the class BlockVplexVolumeIngestOrchestrator method ingestBackendVolumes.
/**
* Ingests the backend volumes and any related replicas.
*
* Calls ingestBlockObjects by getting a nested IngestStrategy
* for each backend volume or replica from the IngestStrategyFactory.
*
* @param backendRequestContext the VplexBackendIngestionContext for the parent virtual volume
*
* @throws IngestionException
*/
private void ingestBackendVolumes(IngestionRequestContext requestContext, VplexVolumeIngestionContext backendRequestContext) throws IngestionException {
while (backendRequestContext.hasNext()) {
UnManagedVolume associatedVolume = backendRequestContext.next();
String sourceClusterId = getClusterNameForVarray(backendRequestContext.getVarray(associatedVolume), requestContext.getStorageSystem());
String haClusterId = getClusterNameForVarray(backendRequestContext.getHaVarray(associatedVolume), requestContext.getStorageSystem());
_logger.info("the source cluster id is {} and the high availability cluster id is {}", sourceClusterId, haClusterId);
backendRequestContext.setHaClusterId(haClusterId);
_logger.info("Ingestion started for vplex backend volume {}", associatedVolume.getNativeGuid());
try {
validateBackendVolumeVpool(associatedVolume, backendRequestContext.getVpool(associatedVolume));
IngestStrategy ingestStrategy = ingestStrategyFactory.buildIngestStrategy(associatedVolume, IngestStrategyFactory.DISREGARD_PROTECTION);
@SuppressWarnings("unchecked") BlockObject blockObject = ingestStrategy.ingestBlockObjects(backendRequestContext, VolumeIngestionUtil.getBlockObjectClass(associatedVolume));
if (null == blockObject) {
// ingestion did not succeed, but in case it wasn't, throw one
throw IngestionException.exceptions.generalVolumeException(associatedVolume.getLabel(), "check the logs for more details");
}
// Note that a VPLEX backend volume could also be a snapshot target volume.
// When this is the case, the local volume ingest strategy is what will be
// retrieved and executed. As a result, the object returned will be a
// Volume instance not a BlockSnapshot instance. However, the local volume
// ingest strategy realizes that the volume may also be a snapshot target
// volume and creates the BlockSnapshot instance to represent the snapshot
// target volume and adds this BlockSnapshot instance to the created objects
// list. Because the BlockSnapshot and Volume instances will have the same
// native GUID, as they represent the same physical volume, we can't
// add the Volume to the created objects list as it would just replace
// the BlockSnapshot instance and only the Volume would get created. So,
// we first move the snapshot to the created snapshots list before adding
// the volume to the created objects list.
Map<String, BlockObject> createdObjectMap = backendRequestContext.getBlockObjectsToBeCreatedMap();
String blockObjectNativeGuid = blockObject.getNativeGuid();
if (createdObjectMap.containsKey(blockObjectNativeGuid)) {
BlockObject createdBlockObject = createdObjectMap.get(blockObjectNativeGuid);
if (createdBlockObject instanceof BlockSnapshot) {
_logger.info("Backend ingestion created block snapshot {}", blockObjectNativeGuid);
// The snapshot will be created with the backend volume project, so we
// need to update that to the frontend project.
((BlockSnapshot) createdBlockObject).setProject(new NamedURI(backendRequestContext.getFrontendProject().getId(), createdBlockObject.getLabel()));
backendRequestContext.getCreatedSnapshotMap().put(blockObjectNativeGuid, (BlockSnapshot) createdBlockObject);
createdObjectMap.put(blockObjectNativeGuid, blockObject);
} else {
// This should not happen. If there is an instance in the created
// objects list with the same guid as the ingested block object
// it must be that the backend volume is also a snapshot target
// volume and the strategy created the BlockSnapshot instance and
// added it to the created objects list.
_logger.warn("Unexpected object in created objects list during backend ingestion {}:{}", blockObjectNativeGuid, createdBlockObject.getLabel());
}
} else {
createdObjectMap.put(blockObjectNativeGuid, blockObject);
}
backendRequestContext.getProcessedUnManagedVolumeMap().put(associatedVolume.getNativeGuid(), backendRequestContext.getVolumeContext());
_logger.info("Ingestion ended for backend volume {}", associatedVolume.getNativeGuid());
} catch (Exception ex) {
_logger.error(ex.getLocalizedMessage());
backendRequestContext.rollback();
throw ex;
}
}
}
use of com.emc.storageos.db.client.model.UnManagedDiscoveredObjects.UnManagedVolume in project coprhd-controller by CoprHD.
the class BlockVplexVolumeIngestOrchestrator method validateContext.
/**
* Validate the VplexBackendIngestionContext against the
* VirtualPool and Tenant.
*
* @param vpool the target VirtualPool for ingestion
* @param tenant the Tenant in use
* @param context the VplexBackendIngestionContext
* @throws IngestionException
*/
private void validateContext(VirtualPool vpool, TenantOrg tenant, VplexBackendIngestionContext context) throws IngestionException {
UnManagedVolume unManagedVirtualVolume = context.getUnmanagedVirtualVolume();
List<UnManagedVolume> unManagedBackendVolumes = context.getUnmanagedBackendVolumes();
_logger.info("validating the ingestion context for these backend volumes: " + unManagedBackendVolumes);
_logger.info("checking if we have found enough backend volumes for ingestion");
if ((context.isLocal() && (unManagedBackendVolumes.isEmpty())) || context.isDistributed() && (unManagedBackendVolumes.size() < 2)) {
String supportingDevice = PropertySetterUtil.extractValueFromStringSet(SupportedVolumeInformation.VPLEX_SUPPORTING_DEVICE_NAME.toString(), unManagedVirtualVolume.getVolumeInformation());
if (unManagedBackendVolumes.isEmpty()) {
String reason = "failed to find any VPLEX backend volume for UnManagedVolume " + unManagedVirtualVolume.getLabel() + " with supporting device " + supportingDevice + ". Has the backend array been discovered for unmanaged volumes?";
_logger.error(reason);
throw IngestionException.exceptions.validationException(reason);
} else {
String reason = "failed to find all VPLEX backend volume for UnManagedVolume " + unManagedVirtualVolume.getLabel() + " with supporting device " + supportingDevice + ". Did find these backend volumes, though: " + Joiner.on(", ").join(unManagedBackendVolumes) + ". Have all backend arrays been discovered for unmanaged volumes?";
;
_logger.error(reason);
throw IngestionException.exceptions.validationException(reason);
}
}
if (!VplexBackendIngestionContext.DISCOVERY_MODE_DB_ONLY.equals(_discoveryMode)) {
// validate the supporting device structure is compatible with vipr
// will contact the VPLEX API to check the current device structure
context.validateSupportingDeviceStructure();
}
// as we can only support ingesting snaps or clones on one leg
if (context.isDistributed()) {
_logger.info("checking for presence of replicas on both legs of this distributed volume");
// each entry in these collections is a concatenated list of replica names
// on each backend volume, so if their size is more than one, that means there
// are replicas present on both legs.
// for example: all the backend volume name strings on leg 1 are concatenated,
// added to snapshotsList at position 0. All the backend volumes on leg2 (if
// present) are concatenated and added to snapshotsList at position 1. So,
// if the size snapshotsList is greater than 1, we've got snaps on both legs.
List<String> snapshotsList = new ArrayList<String>();
List<String> clonesList = new ArrayList<String>();
for (UnManagedVolume vol : unManagedBackendVolumes) {
StringSet snapshots = VplexBackendIngestionContext.extractValuesFromStringSet(SupportedVolumeInformation.SNAPSHOTS.name(), vol.getVolumeInformation());
if (snapshots != null && !snapshots.isEmpty()) {
snapshotsList.add(Joiner.on(", ").join(snapshots));
}
StringSet clones = VplexBackendIngestionContext.extractValuesFromStringSet(SupportedVolumeInformation.FULL_COPIES.name(), vol.getVolumeInformation());
if (clones != null && !clones.isEmpty()) {
clonesList.add(Joiner.on(", ").join(clones));
}
}
// build up an error message
int counter = 0;
StringBuilder message = new StringBuilder("");
if (snapshotsList.size() > 1) {
for (String snapshots : snapshotsList) {
if (counter > 0) {
message.append(" and");
}
message.append(" one distributed volume component has snapshots ").append(snapshots);
counter++;
}
counter = 0;
}
if (clonesList.size() > 1) {
for (String clones : clonesList) {
if (counter > 0) {
message.append(" and");
}
message.append(" one distributed volume component has full copies ").append(clones);
counter++;
}
}
if (message.length() > 0) {
String reason = message.toString();
_logger.error(reason);
throw IngestionException.exceptions.vplexVolumeCannotHaveReplicasOnBothLegs(reason);
}
}
for (UnManagedVolume vol : unManagedBackendVolumes) {
_logger.info("checking for non native mirrors on backend volume " + vol.getNativeGuid());
StringSet mirrors = PropertySetterUtil.extractValuesFromStringSet(SupportedVolumeInformation.MIRRORS.toString(), vol.getVolumeInformation());
Iterator<String> mirrorator = mirrors.iterator();
while (mirrorator.hasNext()) {
String mirrorGuid = mirrorator.next();
_logger.info("\tvolume has mirror " + mirrorGuid);
for (Entry<UnManagedVolume, String> entry : context.getUnmanagedVplexMirrors().entrySet()) {
if (mirrorGuid.equals(entry.getKey().getNativeGuid())) {
_logger.info("\t\tbut it's native, so it's okay...");
mirrorator.remove();
}
}
}
if (!mirrors.isEmpty()) {
String reason = "cannot ingest a mirror on the backend array, " + "only VPLEX device mirrors are supported. Mirrors found: " + Joiner.on(", ").join(mirrors);
_logger.error(reason);
throw IngestionException.exceptions.validationException(reason);
}
}
// front of it, otherwise, we can't ingest it.
if (context.getUnmanagedBackendOnlyClones().size() > 0) {
List<String> cloneInfo = new ArrayList<String>();
for (Entry<UnManagedVolume, Set<UnManagedVolume>> cloneEntry : context.getUnmanagedBackendOnlyClones().entrySet()) {
String message = cloneEntry.getKey().getLabel() + " has ";
List<String> clones = new ArrayList<String>();
for (UnManagedVolume clone : cloneEntry.getValue()) {
clones.add(clone.getLabel());
}
message += Joiner.on(", ").join(clones) + ". ";
cloneInfo.add(message);
}
String reason = "cannot currently ingest a clone on the backend array " + "that doesn't have a virtual volume in front of it. " + "Backend-only clones found: " + Joiner.on(", ").join(cloneInfo);
_logger.error(reason);
throw IngestionException.exceptions.validationException(reason);
}
int mirrorCount = context.getUnmanagedVplexMirrors().size();
if (mirrorCount > 0) {
_logger.info("{} native mirror(s) are present, validating vpool", mirrorCount);
if (VirtualPool.vPoolSpecifiesMirrors(vpool, _dbClient)) {
if (mirrorCount > vpool.getMaxNativeContinuousCopies()) {
if (context.isDistributed() && mirrorCount == 2) {
// there are two mirrors
// we need to check that they are on different clusters
List<UnManagedVolume> mirrors = new ArrayList<UnManagedVolume>();
for (UnManagedVolume mirror : context.getUnmanagedVplexMirrors().keySet()) {
mirrors.add(mirror);
}
if (mirrors.size() == 2) {
String backendClusterId0 = VplexBackendIngestionContext.extractValueFromStringSet(SupportedVolumeInformation.VPLEX_BACKEND_CLUSTER_ID.toString(), mirrors.get(0).getVolumeInformation());
String backendClusterId1 = VplexBackendIngestionContext.extractValueFromStringSet(SupportedVolumeInformation.VPLEX_BACKEND_CLUSTER_ID.toString(), mirrors.get(1).getVolumeInformation());
if (backendClusterId0.equals(backendClusterId1)) {
// the different clusters check failed
StringBuilder reason = new StringBuilder("the volume's mirrors must be on separate ");
reason.append(" vplex clusters. mirrors found: ");
reason.append(backendClusterId0).append(": ").append(mirrors.get(0).getLabel()).append("; ").append(backendClusterId1).append(": ").append(mirrors.get(1).getLabel()).append(".");
String message = reason.toString();
_logger.error(message);
throw IngestionException.exceptions.validationException(message);
} else {
// a high availability vpool is required
VirtualPool haVpool = VirtualPool.getHAVPool(vpool, _dbClient);
if (haVpool == null) {
String reason = "no high availability virtual pool is " + "set on source virtual pool " + vpool.getLabel();
_logger.error(reason);
throw IngestionException.exceptions.validationException(reason);
}
// max continuous copies needs to be set to one on both source and ha vpools
if (vpool.getMaxNativeContinuousCopies() == 1 && haVpool.getMaxNativeContinuousCopies() == 1) {
_logger.info("volume is distributed, has a mirror on each leg, both source and " + "high availaiblity vpools have continuous copies value of 1, " + "volume is ok for ingestion");
} else {
StringBuilder reason = new StringBuilder("the virtual pools' continuous copy ");
reason.append("settings are incorrect for ingesting a dual distributed mirror. ");
reason.append("Source virtual pool is set to ").append(vpool.getMaxNativeContinuousCopies()).append(" and target virtual pool is set to ").append(haVpool.getMaxNativeContinuousCopies()).append(". ");
reason.append("Mirrors found - ").append(backendClusterId0).append(": ").append(mirrors.get(0).getLabel()).append("; ").append(backendClusterId1).append(": ").append(mirrors.get(1).getLabel()).append(".");
String message = reason.toString();
_logger.error(message);
throw IngestionException.exceptions.validationException(message);
}
}
}
} else {
StringBuilder reason = new StringBuilder("volume has more continuous copies (");
reason.append(mirrorCount).append(" than vpool allows. Mirrors found: ");
reason.append(Joiner.on(", ").join(context.getUnmanagedVplexMirrors().keySet()));
String message = reason.toString();
_logger.error(message);
throw IngestionException.exceptions.validationException(message);
}
}
} else {
String reason = "virtual pool does not allow continuous copies, but volume has " + mirrorCount + " mirror(s)";
_logger.error(reason);
throw IngestionException.exceptions.validationException(reason);
}
}
int snapshotCount = context.getUnmanagedSnapshots().size();
if (snapshotCount > 0) {
_logger.info("{} snapshot(s) are present, validating vpool", snapshotCount);
if (VirtualPool.vPoolSpecifiesSnapshots(vpool)) {
if (snapshotCount > vpool.getMaxNativeSnapshots()) {
String reason = "volume has more snapshots (" + snapshotCount + ") than vpool allows";
_logger.error(reason);
throw IngestionException.exceptions.validationException(reason);
}
} else {
String reason = "vpool does not allow snapshots, but volume has " + snapshotCount + " snapshot(s)";
_logger.error(reason);
throw IngestionException.exceptions.validationException(reason);
}
}
long unManagedVolumesCapacity = VolumeIngestionUtil.getTotalUnManagedVolumeCapacity(_dbClient, context.getUnmanagedBackendVolumeUris());
_logger.info("validating total backend volume capacity {} against the vpool", unManagedVolumesCapacity);
CapacityUtils.validateQuotasForProvisioning(_dbClient, vpool, context.getBackendProject(), tenant, unManagedVolumesCapacity, "volume");
_logger.info("validating backend volumes against the vpool");
VolumeIngestionUtil.checkIngestionRequestValidForUnManagedVolumes(context.getUnmanagedBackendVolumeUris(), vpool, _dbClient);
}
Aggregations