Search in sources :

Example 36 with VolumeDescriptor

use of com.emc.storageos.blockorchestrationcontroller.VolumeDescriptor in project coprhd-controller by CoprHD.

the class BlockDeviceController method addStepsForPostDeleteVolumes.

@Override
public String addStepsForPostDeleteVolumes(Workflow workflow, String waitFor, List<VolumeDescriptor> volumes, String taskId, VolumeWorkflowCompleter completer) {
    // delete replication group if it becomes empty
    // Get the list of descriptors which represent source volumes to be deleted
    List<VolumeDescriptor> volumeDescriptors = VolumeDescriptor.filterByType(volumes, new VolumeDescriptor.Type[] { VolumeDescriptor.Type.BLOCK_DATA }, null);
    // If no source volumes, just return
    if (volumeDescriptors.isEmpty()) {
        _log.info("No post deletion step required");
        return waitFor;
    }
    // Sort the volumes by its system, and replicationGroup
    Map<String, Set<URI>> rgVolsMap = new HashMap<String, Set<URI>>();
    for (VolumeDescriptor volumeDescriptor : volumeDescriptors) {
        URI volumeURI = volumeDescriptor.getVolumeURI();
        Volume volume = _dbClient.queryObject(Volume.class, volumeURI);
        if (volume != null) {
            /*
                 * No need to remove replication group as SRDF volume's
                 * rep group will be removed as part of srdf volume delete steps.
                 */
            if (!Volume.isSRDFProtectedVolume(volume)) {
                String replicationGroup = volume.getReplicationGroupInstance();
                if (NullColumnValueGetter.isNotNullValue(replicationGroup)) {
                    URI storage = volume.getStorageController();
                    String key = storage.toString() + replicationGroup;
                    Set<URI> rgVolumeList = rgVolsMap.get(key);
                    if (rgVolumeList == null) {
                        rgVolumeList = new HashSet<URI>();
                        rgVolsMap.put(key, rgVolumeList);
                    }
                    rgVolumeList.add(volumeURI);
                }
            } else {
                _log.info("post delete not required for SRDF Volume :{}", volume.getId());
            }
        }
    }
    if (rgVolsMap.isEmpty()) {
        return waitFor;
    }
    for (Set<URI> volumeURIs : rgVolsMap.values()) {
        // find member volumes in the group
        List<Volume> volumeList = new ArrayList<Volume>();
        Iterator<Volume> volumeIterator = _dbClient.queryIterativeObjects(Volume.class, volumeURIs);
        while (volumeIterator.hasNext()) {
            Volume volume = volumeIterator.next();
            if (volume != null && !volume.getInactive()) {
                volumeList.add(volume);
            }
        }
        Volume firstVol = volumeList.get(0);
        String rgName = firstVol.getReplicationGroupInstance();
        URI storage = firstVol.getStorageController();
        URI cgURI = firstVol.getConsistencyGroup();
        // delete replication group from array
        if (ControllerUtils.replicationGroupHasNoOtherVolume(_dbClient, rgName, volumeURIs, storage)) {
            _log.info(String.format("Adding step to delete the replication group %s", rgName));
            StorageSystem storageSystem = _dbClient.queryObject(StorageSystem.class, storage);
            waitFor = workflow.createStep(UPDATE_CONSISTENCY_GROUP_STEP_GROUP, String.format("Deleting replication group  %s", rgName), waitFor, storage, storageSystem.getSystemType(), this.getClass(), deleteConsistencyGroupMethod(storage, cgURI, rgName, false, false, false), rollbackMethodNullMethod(), null);
        }
    }
    return waitFor;
}
Also used : VolumeDescriptor(com.emc.storageos.blockorchestrationcontroller.VolumeDescriptor) Set(java.util.Set) HashSet(java.util.HashSet) StringSet(com.emc.storageos.db.client.model.StringSet) HashMap(java.util.HashMap) ArrayList(java.util.ArrayList) NamedURI(com.emc.storageos.db.client.model.NamedURI) FCTN_MIRROR_TO_URI(com.emc.storageos.db.client.util.CommonTransformerFunctions.FCTN_MIRROR_TO_URI) URI(java.net.URI) Volume(com.emc.storageos.db.client.model.Volume) StorageSystem(com.emc.storageos.db.client.model.StorageSystem)

Example 37 with VolumeDescriptor

use of com.emc.storageos.blockorchestrationcontroller.VolumeDescriptor in project coprhd-controller by CoprHD.

the class BlockDeviceController method addStepsForCreateVolumes.

/**
 * {@inheritDoc}
 */
@Override
public String addStepsForCreateVolumes(Workflow workflow, String waitFor, List<VolumeDescriptor> origVolumes, String taskId) throws ControllerException {
    // Get the list of descriptors the BlockDeviceController needs to create volumes for.
    List<VolumeDescriptor> volumeDescriptors = VolumeDescriptor.filterByType(origVolumes, new VolumeDescriptor.Type[] { VolumeDescriptor.Type.BLOCK_DATA, VolumeDescriptor.Type.RP_SOURCE, VolumeDescriptor.Type.RP_JOURNAL, VolumeDescriptor.Type.RP_TARGET, VolumeDescriptor.Type.SRDF_SOURCE, VolumeDescriptor.Type.SRDF_TARGET }, null);
    // If no volumes to create, just return
    if (volumeDescriptors.isEmpty()) {
        return waitFor;
    }
    // Segregate by pool to list of volumes.
    Map<URI, Map<Long, List<VolumeDescriptor>>> poolMap = VolumeDescriptor.getPoolSizeMap(volumeDescriptors);
    // Add a Step to create the consistency group if needed
    waitFor = addStepsForCreateConsistencyGroup(workflow, waitFor, volumeDescriptors, CREATE_CONSISTENCY_GROUP_STEP_GROUP);
    waitFor = addStepsForReplicaRollbackCleanup(workflow, waitFor, volumeDescriptors);
    // For meta volumes add Step for each meta volume, except vmax thin meta volumes.
    for (URI poolURI : poolMap.keySet()) {
        for (Long volumeSize : poolMap.get(poolURI).keySet()) {
            List<VolumeDescriptor> descriptors = poolMap.get(poolURI).get(volumeSize);
            List<URI> volumeURIs = VolumeDescriptor.getVolumeURIs(descriptors);
            VolumeDescriptor first = descriptors.get(0);
            URI deviceURI = first.getDeviceURI();
            VirtualPoolCapabilityValuesWrapper capabilities = first.getCapabilitiesValues();
            // Check if volumes have to be created as meta volumes
            _log.debug(String.format("Capabilities : isMeta: %s, Meta Type: %s, Member size: %s, Count: %s", capabilities.getIsMetaVolume(), capabilities.getMetaVolumeType(), capabilities.getMetaVolumeMemberSize(), capabilities.getMetaVolumeMemberCount()));
            Volume volume = _dbClient.queryObject(Volume.class, first.getVolumeURI());
            StorageSystem storageSystem = _dbClient.queryObject(StorageSystem.class, volume.getStorageController());
            boolean createAsMetaVolume = capabilities.getIsMetaVolume() || MetaVolumeUtils.createAsMetaVolume(first.getVolumeURI(), _dbClient, capabilities);
            if (storageSystem.checkIfVmax3()) {
                // VMAX3 does not support META and we will get here due to change VPool
                createAsMetaVolume = false;
            // scenario
            }
            if (createAsMetaVolume) {
                // For vmax thin meta volumes we can create multiple meta volumes in one smis request
                if (volume.getThinlyProvisioned() && storageSystem.getSystemType().equals(StorageSystem.Type.vmax.toString())) {
                    workflow.createStep(CREATE_VOLUMES_STEP_GROUP, String.format("Creating meta volumes:%n%s", getVolumesMsg(_dbClient, volumeURIs)), waitFor, deviceURI, getDeviceType(deviceURI), this.getClass(), createMetaVolumesMethod(deviceURI, poolURI, volumeURIs, capabilities), rollbackCreateMetaVolumesMethod(deviceURI, volumeURIs), null);
                } else {
                    // Add workflow step for each meta volume
                    for (URI metaVolumeURI : volumeURIs) {
                        List<URI> metaVolumeURIs = new ArrayList<URI>();
                        metaVolumeURIs.add(metaVolumeURI);
                        String stepId = workflow.createStepId();
                        workflow.createStep(CREATE_VOLUMES_STEP_GROUP, String.format("Creating meta volume:%n%s", getVolumesMsg(_dbClient, metaVolumeURIs)), waitFor, deviceURI, getDeviceType(deviceURI), this.getClass(), createMetaVolumeMethod(deviceURI, poolURI, metaVolumeURI, capabilities), rollbackCreateMetaVolumeMethod(deviceURI, metaVolumeURI, stepId), stepId);
                    }
                }
            } else {
                workflow.createStep(CREATE_VOLUMES_STEP_GROUP, String.format("Creating volumes:%n%s", getVolumesMsg(_dbClient, volumeURIs)), waitFor, deviceURI, getDeviceType(deviceURI), this.getClass(), createVolumesMethod(deviceURI, poolURI, volumeURIs, capabilities), rollbackCreateVolumesMethod(deviceURI, volumeURIs), null);
            }
            // Following workflow step is only applicable to HDS Thin Volume modification.
            if (getDeviceType(deviceURI).equalsIgnoreCase(Type.hds.name())) {
                boolean modifyHitachiVolumeToApplyTieringPolicy = HDSUtils.isVolumeModifyApplicable(first.getVolumeURI(), _dbClient);
                if (modifyHitachiVolumeToApplyTieringPolicy) {
                    workflow.createStep(MODIFY_VOLUMES_STEP_GROUP, String.format("Modifying volumes:%n%s", getVolumesMsg(_dbClient, volumeURIs)), CREATE_VOLUMES_STEP_GROUP, deviceURI, getDeviceType(deviceURI), this.getClass(), moidfyVolumesMethod(deviceURI, poolURI, volumeURIs), rollbackCreateVolumesMethod(deviceURI, volumeURIs), null);
                }
            }
        }
    }
    waitFor = CREATE_VOLUMES_STEP_GROUP;
    return waitFor;
}
Also used : VirtualPoolCapabilityValuesWrapper(com.emc.storageos.volumecontroller.impl.utils.VirtualPoolCapabilityValuesWrapper) VolumeDescriptor(com.emc.storageos.blockorchestrationcontroller.VolumeDescriptor) ArrayList(java.util.ArrayList) NamedURI(com.emc.storageos.db.client.model.NamedURI) FCTN_MIRROR_TO_URI(com.emc.storageos.db.client.util.CommonTransformerFunctions.FCTN_MIRROR_TO_URI) URI(java.net.URI) Volume(com.emc.storageos.db.client.model.Volume) Map(java.util.Map) OpStatusMap(com.emc.storageos.db.client.model.OpStatusMap) HashMap(java.util.HashMap) StorageSystem(com.emc.storageos.db.client.model.StorageSystem)

Example 38 with VolumeDescriptor

use of com.emc.storageos.blockorchestrationcontroller.VolumeDescriptor in project coprhd-controller by CoprHD.

the class BlockDeviceController method addStepsForCreateFullCopy.

/* (non-Javadoc)
     * @see com.emc.storageos.blockorchestrationcontroller.BlockOrchestrationInterface#addStepsForCreateFullCopy(com.emc.storageos.workflow.Workflow, java.lang.String, java.util.List, java.lang.String)
     */
@Override
public String addStepsForCreateFullCopy(Workflow workflow, String waitFor, List<VolumeDescriptor> volumeDescriptors, String taskId) throws InternalException {
    List<VolumeDescriptor> blockVolmeDescriptors = VolumeDescriptor.filterByType(volumeDescriptors, new VolumeDescriptor.Type[] { VolumeDescriptor.Type.BLOCK_DATA, VolumeDescriptor.Type.VPLEX_IMPORT_VOLUME }, new VolumeDescriptor.Type[] {});
    // If no volumes to create, just return
    if (blockVolmeDescriptors.isEmpty()) {
        return waitFor;
    }
    URI storageURI = null;
    boolean createInactive = false;
    List<URI> fullCopyList = new ArrayList<URI>();
    for (VolumeDescriptor descriptor : blockVolmeDescriptors) {
        Volume volume = _dbClient.queryObject(Volume.class, descriptor.getVolumeURI());
        if (volume != null && !volume.getInactive()) {
            URI parentId = volume.getAssociatedSourceVolume();
            if (!NullColumnValueGetter.isNullURI(parentId)) {
                fullCopyList.add(volume.getId());
                storageURI = volume.getStorageController();
                createInactive = Boolean.getBoolean(descriptor.getCapabilitiesValues().getReplicaCreateInactive());
            }
        }
    }
    if (!fullCopyList.isEmpty()) {
        String stepId = workflow.createStepId();
        // Now add the steps to create the block full copy on the storage system
        StorageSystem storageSystem = _dbClient.queryObject(StorageSystem.class, storageURI);
        Workflow.Method createFullCopyMethod = new Workflow.Method(METHOD_CREATE_FULL_COPY_STEP, storageURI, fullCopyList, createInactive);
        Workflow.Method createFullCopyOrchestrationExecutionRollbackMethod = new Workflow.Method(METHOD_CREATE_FULLCOPY_ORCHESTRATE_ROLLBACK_STEP, workflow.getWorkflowURI(), stepId);
        waitFor = workflow.createStep(FULL_COPY_CREATE_ORCHESTRATION_STEP, "Create Block Full Copy", waitFor, storageSystem.getId(), storageSystem.getSystemType(), this.getClass(), createFullCopyMethod, createFullCopyOrchestrationExecutionRollbackMethod, stepId);
        _log.info(String.format("Added %s step [%s] in workflow", FULL_COPY_CREATE_STEP_GROUP, stepId));
    }
    return waitFor;
}
Also used : VolumeDescriptor(com.emc.storageos.blockorchestrationcontroller.VolumeDescriptor) Volume(com.emc.storageos.db.client.model.Volume) ArrayList(java.util.ArrayList) Workflow(com.emc.storageos.workflow.Workflow) NamedURI(com.emc.storageos.db.client.model.NamedURI) FCTN_MIRROR_TO_URI(com.emc.storageos.db.client.util.CommonTransformerFunctions.FCTN_MIRROR_TO_URI) URI(java.net.URI) StorageSystem(com.emc.storageos.db.client.model.StorageSystem)

Example 39 with VolumeDescriptor

use of com.emc.storageos.blockorchestrationcontroller.VolumeDescriptor in project coprhd-controller by CoprHD.

the class BlockDeviceController method addStepsForDeleteVolumes.

/**
 * {@inheritDoc}
 */
@Override
public String addStepsForDeleteVolumes(Workflow workflow, String waitFor, List<VolumeDescriptor> volumes, String taskId) throws ControllerException {
    // The the list of Volumes that the BlockDeviceController needs to process.
    volumes = VolumeDescriptor.filterByType(volumes, new VolumeDescriptor.Type[] { VolumeDescriptor.Type.BLOCK_DATA, VolumeDescriptor.Type.RP_JOURNAL, VolumeDescriptor.Type.RP_TARGET, VolumeDescriptor.Type.RP_VPLEX_VIRT_JOURNAL, VolumeDescriptor.Type.RP_VPLEX_VIRT_TARGET }, null);
    // Check to see if there are any volumes flagged to not be fully deleted.
    // Any flagged volumes will be removed from the list of volumes to delete.
    List<VolumeDescriptor> doNotDeleteDescriptors = VolumeDescriptor.getDoNotDeleteDescriptors(volumes);
    if (doNotDeleteDescriptors != null && !doNotDeleteDescriptors.isEmpty()) {
        // If there are volumes we do not want fully deleted, remove
        // those volumes here.
        volumes.removeAll(doNotDeleteDescriptors);
    }
    // If there are no volumes, just return
    if (volumes.isEmpty()) {
        return waitFor;
    }
    // Segregate by device.
    Map<URI, List<VolumeDescriptor>> deviceMap = VolumeDescriptor.getDeviceMap(volumes);
    // Add a step to delete the volumes in each device.
    for (URI deviceURI : deviceMap.keySet()) {
        volumes = deviceMap.get(deviceURI);
        List<URI> volumeURIs = VolumeDescriptor.getVolumeURIs(volumes);
        workflow.createStep(DELETE_VOLUMES_STEP_GROUP, String.format("Deleting volumes:%n%s", getVolumesMsg(_dbClient, volumeURIs)), waitFor, deviceURI, getDeviceType(deviceURI), this.getClass(), deleteVolumesMethod(deviceURI, volumeURIs), null, null);
    }
    return DELETE_VOLUMES_STEP_GROUP;
}
Also used : Type(com.emc.storageos.db.client.model.DiscoveredDataObject.Type) LockType(com.emc.storageos.locking.LockType) InterfaceType(com.emc.storageos.db.client.model.StorageProvider.InterfaceType) TechnologyType(com.emc.storageos.db.client.model.BlockSnapshot.TechnologyType) RecordType(com.emc.storageos.volumecontroller.impl.monitoring.cim.enums.RecordType) VolumeDescriptor(com.emc.storageos.blockorchestrationcontroller.VolumeDescriptor) Arrays.asList(java.util.Arrays.asList) ApplicationAddVolumeList(com.emc.storageos.volumecontroller.ApplicationAddVolumeList) ArrayList(java.util.ArrayList) URIQueryResultList(com.emc.storageos.db.client.constraint.URIQueryResultList) List(java.util.List) NamedURI(com.emc.storageos.db.client.model.NamedURI) FCTN_MIRROR_TO_URI(com.emc.storageos.db.client.util.CommonTransformerFunctions.FCTN_MIRROR_TO_URI) URI(java.net.URI)

Example 40 with VolumeDescriptor

use of com.emc.storageos.blockorchestrationcontroller.VolumeDescriptor in project coprhd-controller by CoprHD.

the class BlockDeviceController method addStepsForExpandVolume.

/*
     * Add workflow steps for volume expand.
     */
@Override
public String addStepsForExpandVolume(Workflow workflow, String waitFor, List<VolumeDescriptor> volumeDescriptors, String taskId) throws InternalException {
    // The the list of Volumes that the BlockDeviceController needs to process.
    volumeDescriptors = VolumeDescriptor.filterByType(volumeDescriptors, new VolumeDescriptor.Type[] { VolumeDescriptor.Type.BLOCK_DATA, VolumeDescriptor.Type.RP_SOURCE, VolumeDescriptor.Type.RP_TARGET, VolumeDescriptor.Type.RP_EXISTING_SOURCE, VolumeDescriptor.Type.RP_VPLEX_VIRT_SOURCE, VolumeDescriptor.Type.RP_VPLEX_VIRT_TARGET }, null);
    if (volumeDescriptors == null || volumeDescriptors.isEmpty()) {
        return waitFor;
    }
    Map<URI, Long> volumesToExpand = new HashMap<URI, Long>();
    // Check to see if there are any migrations
    List<Migration> migrations = null;
    if (volumeDescriptors != null) {
        List<VolumeDescriptor> migrateDescriptors = VolumeDescriptor.filterByType(volumeDescriptors, new VolumeDescriptor.Type[] { VolumeDescriptor.Type.VPLEX_MIGRATE_VOLUME }, null);
        if (migrateDescriptors != null && !migrateDescriptors.isEmpty()) {
            // Load the migration objects for use later
            migrations = new ArrayList<Migration>();
            Iterator<VolumeDescriptor> migrationIter = migrateDescriptors.iterator();
            while (migrationIter.hasNext()) {
                Migration migration = _dbClient.queryObject(Migration.class, migrationIter.next().getMigrationId());
                migrations.add(migration);
            }
        }
    }
    for (VolumeDescriptor descriptor : volumeDescriptors) {
        // Grab the volume, let's see if an expand is really needed
        Volume volume = _dbClient.queryObject(Volume.class, descriptor.getVolumeURI());
        // If this volume is a VPLEX volume, check to see if we need to expand its backend volume.
        if (volume.getAssociatedVolumes() != null && !volume.getAssociatedVolumes().isEmpty()) {
            for (String volStr : volume.getAssociatedVolumes()) {
                URI volStrURI = URI.create(volStr);
                Volume associatedVolume = _dbClient.queryObject(Volume.class, volStrURI);
                boolean migrationExists = false;
                // If there are any volumes that are tagged for migration, ignore them.
                if (migrations != null && !migrations.isEmpty()) {
                    for (Migration migration : migrations) {
                        if (migration.getTarget().equals(volume.getId())) {
                            _log.info("Volume [{}] has a migration, ignore this volume for expand.", volume.getLabel());
                            migrationExists = true;
                            break;
                        }
                    }
                }
                // the new size > existing backend volume's provisioned capacity, otherwise we can ignore.
                if (!migrationExists && associatedVolume.getProvisionedCapacity() != null && descriptor.getVolumeSize() > associatedVolume.getProvisionedCapacity().longValue()) {
                    volumesToExpand.put(volStrURI, descriptor.getVolumeSize());
                }
            }
        } else {
            // new size > existing volume's provisioned capacity, otherwise we can ignore.
            if (volume.getProvisionedCapacity() != null && volume.getProvisionedCapacity().longValue() != 0 && descriptor.getVolumeSize() > volume.getProvisionedCapacity().longValue()) {
                volumesToExpand.put(volume.getId(), descriptor.getVolumeSize());
            }
        }
    }
    String nextStep = (volumesToExpand.size() > 0) ? BLOCK_VOLUME_EXPAND_GROUP : waitFor;
    for (Map.Entry<URI, Long> entry : volumesToExpand.entrySet()) {
        _log.info("Creating WF step for Expand Volume for  {}", entry.getKey().toString());
        Volume volumeToExpand = _dbClient.queryObject(Volume.class, entry.getKey());
        StorageSystem storage = _dbClient.queryObject(StorageSystem.class, volumeToExpand.getStorageController());
        String stepId = workflow.createStepId();
        workflow.createStep(BLOCK_VOLUME_EXPAND_GROUP, String.format("Expand Block volume %s", volumeToExpand), waitFor, storage.getId(), getDeviceType(storage.getId()), BlockDeviceController.class, expandVolumesMethod(volumeToExpand.getStorageController(), volumeToExpand.getPool(), volumeToExpand.getId(), entry.getValue()), rollbackExpandVolumeMethod(volumeToExpand.getStorageController(), volumeToExpand.getId(), stepId), stepId);
        _log.info("Creating workflow step {}", BLOCK_VOLUME_EXPAND_GROUP);
    }
    return nextStep;
}
Also used : VolumeDescriptor(com.emc.storageos.blockorchestrationcontroller.VolumeDescriptor) HashMap(java.util.HashMap) Migration(com.emc.storageos.db.client.model.Migration) NamedURI(com.emc.storageos.db.client.model.NamedURI) FCTN_MIRROR_TO_URI(com.emc.storageos.db.client.util.CommonTransformerFunctions.FCTN_MIRROR_TO_URI) URI(java.net.URI) Type(com.emc.storageos.db.client.model.DiscoveredDataObject.Type) LockType(com.emc.storageos.locking.LockType) InterfaceType(com.emc.storageos.db.client.model.StorageProvider.InterfaceType) TechnologyType(com.emc.storageos.db.client.model.BlockSnapshot.TechnologyType) RecordType(com.emc.storageos.volumecontroller.impl.monitoring.cim.enums.RecordType) Volume(com.emc.storageos.db.client.model.Volume) Map(java.util.Map) OpStatusMap(com.emc.storageos.db.client.model.OpStatusMap) HashMap(java.util.HashMap) StorageSystem(com.emc.storageos.db.client.model.StorageSystem)

Aggregations

VolumeDescriptor (com.emc.storageos.blockorchestrationcontroller.VolumeDescriptor)117 Volume (com.emc.storageos.db.client.model.Volume)98 URI (java.net.URI)86 NamedURI (com.emc.storageos.db.client.model.NamedURI)77 ArrayList (java.util.ArrayList)77 StorageSystem (com.emc.storageos.db.client.model.StorageSystem)38 HashMap (java.util.HashMap)38 StringSet (com.emc.storageos.db.client.model.StringSet)29 InternalException (com.emc.storageos.svcs.errorhandling.resources.InternalException)27 URIQueryResultList (com.emc.storageos.db.client.constraint.URIQueryResultList)19 FCTN_STRING_TO_URI (com.emc.storageos.db.client.util.CommonTransformerFunctions.FCTN_STRING_TO_URI)18 Workflow (com.emc.storageos.workflow.Workflow)18 ApplicationAddVolumeList (com.emc.storageos.volumecontroller.ApplicationAddVolumeList)17 ControllerException (com.emc.storageos.volumecontroller.ControllerException)17 List (java.util.List)17 BlockOrchestrationController (com.emc.storageos.blockorchestrationcontroller.BlockOrchestrationController)16 VirtualPool (com.emc.storageos.db.client.model.VirtualPool)16 InternalServerErrorException (com.emc.storageos.svcs.errorhandling.resources.InternalServerErrorException)16 URISyntaxException (java.net.URISyntaxException)16 BlockConsistencyGroup (com.emc.storageos.db.client.model.BlockConsistencyGroup)15