Search in sources :

Example 11 with BlockOrchestrationController

use of com.emc.storageos.blockorchestrationcontroller.BlockOrchestrationController in project coprhd-controller by CoprHD.

the class VPlexBlockServiceApiImpl method createVolumes.

/**
 * {@inheritDoc}
 *
 * @throws InternalException
 */
@Override
public TaskList createVolumes(VolumeCreate param, Project project, VirtualArray vArray, VirtualPool vPool, Map<VpoolUse, List<Recommendation>> recommendationMap, TaskList taskList, String task, VirtualPoolCapabilityValuesWrapper vPoolCapabilities) throws InternalException {
    List<Recommendation> volRecommendations = recommendationMap.get(VpoolUse.ROOT);
    List<Recommendation> srdfCopyRecommendations = recommendationMap.get(VpoolUse.SRDF_COPY);
    if (taskList == null) {
        taskList = new TaskList();
    }
    List<URI> allVolumes = new ArrayList<URI>();
    List<VolumeDescriptor> descriptors = createVPlexVolumeDescriptors(param, project, vArray, vPool, volRecommendations, task, vPoolCapabilities, vPoolCapabilities.getBlockConsistencyGroup(), taskList, allVolumes, true);
    for (VolumeDescriptor desc : descriptors) {
        s_logger.info("Vplex Root Descriptors: " + desc.toString());
    }
    if (srdfCopyRecommendations != null) {
        // This may be a Vplex volume or not
        for (Recommendation srdfCopyRecommendation : srdfCopyRecommendations) {
            vArray = _dbClient.queryObject(VirtualArray.class, srdfCopyRecommendation.getVirtualArray());
            vPool = srdfCopyRecommendation.getVirtualPool();
            List<VolumeDescriptor> srdfCopyDescriptors = new ArrayList<VolumeDescriptor>();
            List<Recommendation> copyRecommendations = new ArrayList<Recommendation>();
            copyRecommendations.add(srdfCopyRecommendation);
            if (srdfCopyRecommendation instanceof VPlexRecommendation) {
                String name = param.getName();
                // Do not pass in the consistency group for vplex volumes fronting targets
                // as we will eventually put them in the target CG.
                srdfCopyDescriptors = createVPlexVolumeDescriptors(param, project, vArray, vPool, copyRecommendations, task, vPoolCapabilities, null, taskList, allVolumes, true);
                param.setName(name);
            } else {
                srdfCopyDescriptors = super.createVolumesAndDescriptors(srdfCopyDescriptors, param.getName() + "_srdf_copy", vPoolCapabilities.getSize(), project, vArray, vPool, copyRecommendations, taskList, task, vPoolCapabilities);
            }
            for (VolumeDescriptor desc : srdfCopyDescriptors) {
                s_logger.info("SRDF Copy: " + desc.toString());
            }
            descriptors.addAll(srdfCopyDescriptors);
        }
    }
    // Log volume descriptor information
    logVolumeDescriptorPrecreateInfo(descriptors, task);
    // Now we get the Orchestration controller and use it to create the volumes of all types.
    try {
        BlockOrchestrationController controller = getController(BlockOrchestrationController.class, BlockOrchestrationController.BLOCK_ORCHESTRATION_DEVICE);
        controller.createVolumes(descriptors, task);
    } catch (InternalException e) {
        if (s_logger.isErrorEnabled()) {
            s_logger.error("Controller error", e);
        }
        String errMsg = String.format("Controller error: %s", e.getMessage());
        Operation statusUpdate = new Operation(Operation.Status.error.name(), errMsg);
        for (URI volumeURI : allVolumes) {
            _dbClient.updateTaskOpStatus(Volume.class, volumeURI, task, statusUpdate);
        }
        for (TaskResourceRep volumeTask : taskList.getTaskList()) {
            volumeTask.setState(Operation.Status.error.name());
            volumeTask.setMessage(errMsg);
        }
        throw e;
    }
    return taskList;
}
Also used : VolumeDescriptor(com.emc.storageos.blockorchestrationcontroller.VolumeDescriptor) VirtualArray(com.emc.storageos.db.client.model.VirtualArray) BlockOrchestrationController(com.emc.storageos.blockorchestrationcontroller.BlockOrchestrationController) VPlexRecommendation(com.emc.storageos.volumecontroller.VPlexRecommendation) TaskList(com.emc.storageos.model.TaskList) ArrayList(java.util.ArrayList) TaskResourceRep(com.emc.storageos.model.TaskResourceRep) Operation(com.emc.storageos.db.client.model.Operation) FCTN_STRING_TO_URI(com.emc.storageos.db.client.util.CommonTransformerFunctions.FCTN_STRING_TO_URI) NamedURI(com.emc.storageos.db.client.model.NamedURI) URI(java.net.URI) FCTN_VPLEX_MIRROR_TO_URI(com.emc.storageos.db.client.util.CommonTransformerFunctions.FCTN_VPLEX_MIRROR_TO_URI) SRDFCopyRecommendation(com.emc.storageos.volumecontroller.SRDFCopyRecommendation) VolumeRecommendation(com.emc.storageos.api.service.impl.placement.VolumeRecommendation) VPlexRecommendation(com.emc.storageos.volumecontroller.VPlexRecommendation) Recommendation(com.emc.storageos.volumecontroller.Recommendation) SRDFRecommendation(com.emc.storageos.volumecontroller.SRDFRecommendation) InternalException(com.emc.storageos.svcs.errorhandling.resources.InternalException) Volume(com.emc.storageos.db.client.model.Volume)

Example 12 with BlockOrchestrationController

use of com.emc.storageos.blockorchestrationcontroller.BlockOrchestrationController in project coprhd-controller by CoprHD.

the class RPBlockServiceApiImpl method removeProtection.

/**
 * Removes protection from the volume and leaves it in an unprotected state.
 *
 * @param volumes the existing volume being protected.
 * @param newVpool the requested virtual pool
 * @param taskId the task identifier
 * @throws InternalException
 */
private void removeProtection(List<Volume> volumes, VirtualPool newVpool, String taskId) throws InternalException {
    List<URI> volumeURIs = new ArrayList<URI>();
    for (Volume volume : volumes) {
        _log.info(String.format("Request to remove protection from Volume [%s] (%s) and move it to Virtual Pool [%s] (%s)", volume.getLabel(), volume.getId(), newVpool.getLabel(), newVpool.getId()));
        volumeURIs.add(volume.getId());
        // List of RP bookmarks to cleanup (if any)
        List<BlockSnapshot> rpBookmarks = new ArrayList<BlockSnapshot>();
        // Get all the block snapshots and RP bookmarks for the source.
        List<BlockSnapshot> sourceSnapshots = this.getSnapshotsForVolume(volume);
        // Iterate through all snapshots found for the source
        for (BlockSnapshot sourceSnapshot : sourceSnapshots) {
            // Check to see if this is an RP bookmark
            if (TechnologyType.RP.name().equals(sourceSnapshot.getTechnologyType())) {
                // protection.
                if (sourceSnapshot.isSnapshotExported(_dbClient)) {
                    String warningMessage = String.format("RP Bookmark/Snapshot [%s](%s) is exported to Host, " + "please un-export the Bookmark/Snapshot from all exports and place the order again", sourceSnapshot.getLabel(), sourceSnapshot.getId());
                    _log.warn(warningMessage);
                    throw APIException.badRequests.rpBlockApiImplRemoveProtectionException(warningMessage);
                }
                // Add bookmark to be cleaned up in ViPR. These
                // would have been automatically removed in RP when
                // removing protection anyway. So this is a pro-active
                // cleanup step.
                rpBookmarks.add(sourceSnapshot);
            } else {
                // Check to see if the source volume is a RP+VPLEX/MetroPoint volume.
                if (RPHelper.isVPlexVolume(volume, _dbClient)) {
                    // There are block snapshots on the RP+VPLEX/MetroPoint Source, throw an exception to inform the
                    // user. We can not remove protection from a RP+VPLEX Source when there are active block snapshots.
                    // RP+VPLEX/MetroPoint block snapshots are actually replica group snapshots (in a CG). Since we need to
                    // remove the CG from the volume we can not have the replica group containing snaps in it when
                    // trying to remove protection.
                    String warningMessage = String.format("RecoverPoint protected VPLEX Volume [%s](%s) has an active snapshot, please delete the " + "following snapshot and place the order again: [%s](%s)", volume.getLabel(), volume.getId(), sourceSnapshot.getLabel(), sourceSnapshot.getId());
                    warningMessage = warningMessage.substring(0, warningMessage.length() - 2);
                    _log.warn(warningMessage);
                    throw APIException.badRequests.rpBlockApiImplRemoveProtectionException(warningMessage);
                }
            }
        }
        // 2. There are local array snapshots on any of the targets
        for (String targetId : volume.getRpTargets()) {
            Volume targetVolume = _dbClient.queryObject(Volume.class, URI.create(targetId));
            // Ensure targets are not exported
            if (targetVolume.isVolumeExported(_dbClient, true, true)) {
                String warningMessage = String.format("Target Volume [%s](%s) is exported to Host, please " + "un-export the volume from all exports and place the order again", targetVolume.getLabel(), targetVolume.getId());
                _log.warn(warningMessage);
                throw APIException.badRequests.rpBlockApiImplRemoveProtectionException(warningMessage);
            }
            List<BlockSnapshot> targetSnapshots = this.getSnapshotsForVolume(targetVolume);
            for (BlockSnapshot targetSnapshot : targetSnapshots) {
                // There are snapshots on the targets, throw an exception to inform the
                // user. We do not want to auto-clean up the snapshots on the target.
                // The user should first clean up those snapshots.
                String warningMessage = String.format("Target Volume [%s] (%s) has an active snapshot, please delete the " + "following snapshot and place the order again: [%s](%s)", volume.getLabel(), volume.getId(), targetSnapshot.getLabel(), targetSnapshot.getId());
                _log.warn(warningMessage);
                throw APIException.badRequests.rpBlockApiImplRemoveProtectionException(warningMessage);
            }
        }
        if (!rpBookmarks.isEmpty()) {
            for (BlockSnapshot bookmark : rpBookmarks) {
                _log.info(String.format("Deleting RP Snapshot/Bookmark [%s] (%s)", bookmark.getLabel(), bookmark.getId()));
                // Generate task id
                final String deleteSnapshotTaskId = UUID.randomUUID().toString();
                // Delete the snapshot
                this.deleteSnapshot(bookmark, Arrays.asList(bookmark), deleteSnapshotTaskId, VolumeDeleteTypeEnum.FULL.name());
            }
        }
    }
    // Get volume descriptors for all volumes to remove protection from.
    List<VolumeDescriptor> volumeDescriptors = RPHelper.getDescriptorsForVolumesToBeDeleted(null, volumeURIs, RPHelper.REMOVE_PROTECTION, newVpool, _dbClient);
    BlockOrchestrationController controller = getController(BlockOrchestrationController.class, BlockOrchestrationController.BLOCK_ORCHESTRATION_DEVICE);
    controller.deleteVolumes(volumeDescriptors, taskId);
}
Also used : VolumeDescriptor(com.emc.storageos.blockorchestrationcontroller.VolumeDescriptor) BlockOrchestrationController(com.emc.storageos.blockorchestrationcontroller.BlockOrchestrationController) Volume(com.emc.storageos.db.client.model.Volume) ArrayList(java.util.ArrayList) BlockSnapshot(com.emc.storageos.db.client.model.BlockSnapshot) NamedURI(com.emc.storageos.db.client.model.NamedURI) URI(java.net.URI)

Example 13 with BlockOrchestrationController

use of com.emc.storageos.blockorchestrationcontroller.BlockOrchestrationController in project coprhd-controller by CoprHD.

the class RPBlockServiceApiImpl method rpVPlexDataMigration.

/**
 * Create the RP+VPLEX/MetroPoint Data Migration volume descriptors to be passed to the block orchestration
 * change vpool workflow.
 *
 * @param volumes The RP+VPLEX/MetroPoint volumes to migrate
 * @param newVpool The vpool to migrate to
 * @param taskId The task
 * @param validMigrations All valid migrations
 * @param vpoolChangeParam VirtualPool change parameters used to determine if need to suspend on migration
 * @return List of tasks
 * @throws InternalException
 */
private TaskList rpVPlexDataMigration(List<Volume> volumes, VirtualPool newVpool, String taskId, List<RPVPlexMigration> validMigrations, VirtualPoolChangeParam vpoolChangeParam) throws InternalException {
    // TaskList to return
    TaskList taskList = new TaskList();
    if (validMigrations == null || validMigrations.isEmpty()) {
        _log.warn(String.format("No RP+VPLEX migrations found"));
        return taskList;
    }
    _log.info(String.format("%s RP+VPLEX migrations found", validMigrations.size()));
    List<RPVPlexMigration> sourceVpoolMigrations = new ArrayList<RPVPlexMigration>();
    List<RPVPlexMigration> targetVpoolMigrations = new ArrayList<RPVPlexMigration>();
    List<RPVPlexMigration> journalVpoolMigrations = new ArrayList<RPVPlexMigration>();
    try {
        // Group the migrations by personality
        for (RPVPlexMigration migration : validMigrations) {
            switch(migration.getType()) {
                case SOURCE:
                    sourceVpoolMigrations.add(migration);
                    break;
                case TARGET:
                    targetVpoolMigrations.add(migration);
                    break;
                case METADATA:
                    journalVpoolMigrations.add(migration);
                    break;
                default:
                    break;
            }
        }
        // Convenience booleans to quickly check which migrations are required
        boolean sourceMigrationsExist = (!sourceVpoolMigrations.isEmpty());
        boolean targetMigrationsExist = (!targetVpoolMigrations.isEmpty());
        boolean journalMigrationsExist = (!journalVpoolMigrations.isEmpty());
        if (!sourceMigrationsExist && (targetMigrationsExist || journalMigrationsExist)) {
            // When there are no Source migrations and the Source volumes are in RGs we need
            // to make sure all those Source volumes are in the request.
            // 
            // Otherwise we could have the case where some Source volumes have been moved to a
            // new vpool and some have not.
            validateSourceVolumesInRGForMigrationRequest(volumes);
        }
        _log.info(String.format("%s SOURCE migrations, %s TARGET migrations, %s METADATA migrations", sourceVpoolMigrations.size(), targetVpoolMigrations.size(), journalVpoolMigrations.size()));
        // Buffer to log all the migrations
        StringBuffer logMigrations = new StringBuffer();
        logMigrations.append("\n\nRP+VPLEX Migrations:\n");
        // Step 2
        // 
        // Let's find out if there are any Source and Target volumes to migrate.
        // Source and Target migrations will be treated in two different ways depending
        // on if the VPLEX backend volumes are in an array Replication Group(RG) or not.
        // 
        // 1. In RG
        // Being in an RG means that the all volumes in the RG will need to be
        // grouped and migrated together.
        // NOTE:
        // a) All volumes in the RG will need to be selected for the operation to proceed.
        // b) There is restriction on the number of volumes in the RG that will be allowed for the migration.
        // Default value is 25 volumes. This is an existing limitation in the VPLEX code.
        // c) Journal volumes will never be in a backend RG.
        // 2. Not in RG
        // Treated as a normal single migration.
        HashMap<VirtualPool, List<Volume>> allSourceVolumesToMigrate = new HashMap<VirtualPool, List<Volume>>();
        HashMap<VirtualPool, List<Volume>> allTargetVolumesToMigrate = new HashMap<VirtualPool, List<Volume>>();
        findSourceAndTargetMigrations(volumes, newVpool, sourceMigrationsExist, allSourceVolumesToMigrate, targetMigrationsExist, allTargetVolumesToMigrate, targetVpoolMigrations);
        // Step 3
        // 
        // Handle all Source and Target migrations. The ones grouped by RG will
        // be migrated together. The others will be treated as single migrations.
        // Map to store single migrations (those not grouped by RG)
        Map<Volume, VirtualPool> singleMigrations = new HashMap<Volume, VirtualPool>();
        // Source
        // 
        // Source volumes could need to be grouped by RG or not (single migration).
        // 
        // Grouped migrations will have a migration WF initiated via the
        // call to migrateVolumesInReplicationGroup().
        // 
        // Single migrations will be collected afterwards to be migrated explicitly in Step 4 and 6
        // below.
        rpVPlexGroupedMigrations(allSourceVolumesToMigrate, singleMigrations, Volume.PersonalityTypes.SOURCE.name(), logMigrations, taskList, vpoolChangeParam);
        // Targets
        // 
        // Target volumes could need to be grouped by RG or not (single migration).
        // 
        // Grouped migrations will have a migration WF initiated via the
        // call to migrateVolumesInReplicationGroup().
        // 
        // Single migrations will be collected afterwards to be migrated explicitly in Step 4 and 6
        // below.
        rpVPlexGroupedMigrations(allTargetVolumesToMigrate, singleMigrations, Volume.PersonalityTypes.TARGET.name(), logMigrations, taskList, vpoolChangeParam);
        // Journals
        // 
        // Journals will never be in RGs so they will always be treated as single migrations.
        // Journal volumes must be checked against the CG. So we need to gather all affected
        // CGs in the request.
        // A new task will be generated to track each Journal migration.
        Set<URI> cgURIs = BlockConsistencyGroupUtils.getAllCGsFromVolumes(volumes);
        rpVPlexJournalMigrations(journalMigrationsExist, journalVpoolMigrations, singleMigrations, cgURIs, logMigrations);
        logMigrations.append("\n");
        _log.info(logMigrations.toString());
        // Step 4
        // 
        // Create the migration volume descriptors for all single migrations that are not in an RG.
        List<VolumeDescriptor> migrateVolumeDescriptors = new ArrayList<VolumeDescriptor>();
        for (Map.Entry<Volume, VirtualPool> entry : singleMigrations.entrySet()) {
            Volume migrateVolume = entry.getKey();
            VirtualPool migrateToVpool = entry.getValue();
            boolean allowHighAvailabilityMigrations = true;
            if (!migrateVolume.getAssociatedVolumes().isEmpty()) {
                // This is mainly an issue for RP+VPLEX journals.
                if (migrateVolume.getAssociatedVolumes().size() <= 1) {
                    allowHighAvailabilityMigrations = false;
                }
            } else {
                // Ex: Active Source journals that use the default Source vpool for provisioning.
                if (Volume.PersonalityTypes.METADATA.name().equals(migrateVolume.getPersonality())) {
                    allowHighAvailabilityMigrations = false;
                }
            }
            StorageSystem vplexStorageSystem = _dbClient.queryObject(StorageSystem.class, migrateVolume.getStorageController());
            migrateVolumeDescriptors.addAll(vplexBlockServiceApiImpl.createChangeVirtualPoolDescriptors(vplexStorageSystem, migrateVolume, migrateToVpool, taskId, null, null, null, allowHighAvailabilityMigrations));
        }
        // ensure the task is completed correctly and the vpools updated by the completer.
        if (!sourceMigrationsExist && (targetMigrationsExist || journalMigrationsExist)) {
            _log.info("No RP+VPLEX Source migrations detected, creating DUMMY_MIGRATE volume descriptors for the Source volumes.");
            for (Volume volume : volumes) {
                if (volume.checkPersonality(Volume.PersonalityTypes.SOURCE)) {
                    // Add the VPLEX Virtual Volume Descriptor for change vpool
                    VolumeDescriptor dummyMigrate = new VolumeDescriptor(VolumeDescriptor.Type.DUMMY_MIGRATE, volume.getStorageController(), volume.getId(), volume.getPool(), null);
                    Map<String, Object> volumeParams = new HashMap<String, Object>();
                    volumeParams.put(VolumeDescriptor.PARAM_VPOOL_CHANGE_EXISTING_VOLUME_ID, volume.getId());
                    volumeParams.put(VolumeDescriptor.PARAM_VPOOL_CHANGE_NEW_VPOOL_ID, newVpool.getId());
                    volumeParams.put(VolumeDescriptor.PARAM_VPOOL_CHANGE_OLD_VPOOL_ID, volume.getVirtualPool());
                    dummyMigrate.setParameters(volumeParams);
                    migrateVolumeDescriptors.add(dummyMigrate);
                }
            }
        }
        // single migrations.
        if (!migrateVolumeDescriptors.isEmpty()) {
            // Generate the correct task information for single migrations
            List<Volume> migrationVolumes = new ArrayList<Volume>();
            migrationVolumes.addAll(singleMigrations.keySet());
            taskList.getTaskList().addAll(createTasksForVolumes(newVpool, migrationVolumes, taskId).getTaskList());
            // Invoke the block orchestrator for the change vpool operation
            BlockOrchestrationController controller = getController(BlockOrchestrationController.class, BlockOrchestrationController.BLOCK_ORCHESTRATION_DEVICE);
            controller.changeVirtualPool(migrateVolumeDescriptors, taskId);
        } else {
            _log.info(String.format("No extra migrations needed."));
        }
    } catch (Exception e) {
        String errorMsg = String.format("Volume VirtualPool change error: %s", e.getMessage());
        _log.error(errorMsg, e);
        for (TaskResourceRep volumeTask : taskList.getTaskList()) {
            volumeTask.setState(Operation.Status.error.name());
            volumeTask.setMessage(errorMsg);
            _dbClient.updateTaskOpStatus(Volume.class, volumeTask.getResource().getId(), taskId, new Operation(Operation.Status.error.name(), errorMsg));
        }
        throw e;
    }
    return taskList;
}
Also used : BlockOrchestrationController(com.emc.storageos.blockorchestrationcontroller.BlockOrchestrationController) HashMap(java.util.HashMap) TaskList(com.emc.storageos.model.TaskList) ArrayList(java.util.ArrayList) Operation(com.emc.storageos.db.client.model.Operation) NamedURI(com.emc.storageos.db.client.model.NamedURI) URI(java.net.URI) RPVPlexMigration(com.emc.storageos.api.service.impl.resource.utils.RPVPlexMigration) ApplicationAddVolumeList(com.emc.storageos.volumecontroller.ApplicationAddVolumeList) ArrayList(java.util.ArrayList) TaskList(com.emc.storageos.model.TaskList) VolumeGroupVolumeList(com.emc.storageos.model.application.VolumeGroupUpdateParam.VolumeGroupVolumeList) URIQueryResultList(com.emc.storageos.db.client.constraint.URIQueryResultList) StorageSystemConnectivityList(com.emc.storageos.model.systems.StorageSystemConnectivityList) List(java.util.List) StorageSystem(com.emc.storageos.db.client.model.StorageSystem) VolumeDescriptor(com.emc.storageos.blockorchestrationcontroller.VolumeDescriptor) TaskResourceRep(com.emc.storageos.model.TaskResourceRep) VirtualPool(com.emc.storageos.db.client.model.VirtualPool) InternalException(com.emc.storageos.svcs.errorhandling.resources.InternalException) InternalServerErrorException(com.emc.storageos.svcs.errorhandling.resources.InternalServerErrorException) ControllerException(com.emc.storageos.volumecontroller.ControllerException) APIException(com.emc.storageos.svcs.errorhandling.resources.APIException) RecoverPointException(com.emc.storageos.recoverpoint.exceptions.RecoverPointException) Volume(com.emc.storageos.db.client.model.Volume) DiscoveredDataObject(com.emc.storageos.db.client.model.DiscoveredDataObject) DataObject(com.emc.storageos.db.client.model.DataObject) Map(java.util.Map) OpStatusMap(com.emc.storageos.db.client.model.OpStatusMap) HashMap(java.util.HashMap)

Example 14 with BlockOrchestrationController

use of com.emc.storageos.blockorchestrationcontroller.BlockOrchestrationController in project coprhd-controller by CoprHD.

the class SRDFBlockServiceApiImpl method createVolumes.

@Override
public TaskList createVolumes(final VolumeCreate param, final Project project, final VirtualArray varray, final VirtualPool vpool, final Map<VpoolUse, List<Recommendation>> recommendationMap, TaskList taskList, final String task, final VirtualPoolCapabilityValuesWrapper capabilities) throws InternalException {
    List<Recommendation> volRecommendations = recommendationMap.get(VpoolUse.ROOT);
    Long size = SizeUtil.translateSize(param.getSize());
    BlockOrchestrationController controller = getController(BlockOrchestrationController.class, BlockOrchestrationController.BLOCK_ORCHESTRATION_DEVICE);
    for (Recommendation volRecommendation : volRecommendations) {
        List<VolumeDescriptor> existingDescriptors = new ArrayList<VolumeDescriptor>();
        List<VolumeDescriptor> volumeDescriptors = createVolumesAndDescriptors(existingDescriptors, param.getName(), size, project, varray, vpool, volRecommendations, taskList, task, capabilities);
        List<URI> volumeURIs = VolumeDescriptor.getVolumeURIs(volumeDescriptors);
        try {
            controller.createVolumes(volumeDescriptors, task);
        } catch (InternalException e) {
            if (_log.isErrorEnabled()) {
                _log.error("Controller error", e);
            }
            String errorMsg = String.format("Controller error: %s", e.getMessage());
            if (volumeURIs != null) {
                for (URI volumeURI : volumeURIs) {
                    Volume volume = _dbClient.queryObject(Volume.class, volumeURI);
                    if (volume != null) {
                        Operation op = new Operation();
                        ServiceCoded coded = ServiceError.buildServiceError(ServiceCode.API_RP_VOLUME_CREATE_ERROR, errorMsg.toString());
                        op.setMessage(errorMsg);
                        op.error(coded);
                        _dbClient.createTaskOpStatus(Volume.class, volumeURI, task, op);
                        TaskResourceRep volumeTask = toTask(volume, task, op);
                        if (volume.getPersonality() != null && volume.getPersonality().equals(Volume.PersonalityTypes.SOURCE.toString())) {
                            taskList.getTaskList().add(volumeTask);
                        }
                    }
                }
            }
        }
    }
    return taskList;
}
Also used : BlockOrchestrationController(com.emc.storageos.blockorchestrationcontroller.BlockOrchestrationController) VolumeDescriptor(com.emc.storageos.blockorchestrationcontroller.VolumeDescriptor) ArrayList(java.util.ArrayList) TaskResourceRep(com.emc.storageos.model.TaskResourceRep) Operation(com.emc.storageos.db.client.model.Operation) NamedURI(com.emc.storageos.db.client.model.NamedURI) URI(java.net.URI) MetaVolumeRecommendation(com.emc.storageos.volumecontroller.impl.smis.MetaVolumeRecommendation) SRDFCopyRecommendation(com.emc.storageos.volumecontroller.SRDFCopyRecommendation) Recommendation(com.emc.storageos.volumecontroller.Recommendation) SRDFRecommendation(com.emc.storageos.volumecontroller.SRDFRecommendation) InternalException(com.emc.storageos.svcs.errorhandling.resources.InternalException) Volume(com.emc.storageos.db.client.model.Volume) ServiceCoded(com.emc.storageos.svcs.errorhandling.model.ServiceCoded)

Example 15 with BlockOrchestrationController

use of com.emc.storageos.blockorchestrationcontroller.BlockOrchestrationController in project coprhd-controller by CoprHD.

the class DefaultBlockServiceApiImpl method createVolumes.

@Override
public TaskList createVolumes(VolumeCreate param, Project project, VirtualArray neighborhood, VirtualPool cos, Map<VpoolUse, List<Recommendation>> recommendationMap, TaskList taskList, String task, VirtualPoolCapabilityValuesWrapper cosCapabilities) throws InternalException {
    Long size = SizeUtil.translateSize(param.getSize());
    List<VolumeDescriptor> existingDescriptors = new ArrayList<VolumeDescriptor>();
    List<VolumeDescriptor> volumeDescriptors = createVolumesAndDescriptors(existingDescriptors, param.getName(), size, project, neighborhood, cos, recommendationMap.get(VpoolUse.ROOT), taskList, task, cosCapabilities);
    List<Volume> preparedVolumes = getPreparedVolumes(volumeDescriptors);
    // Check for special characters in volume names
    checkVolumeNames(preparedVolumes.get(0));
    final BlockOrchestrationController controller = getController(BlockOrchestrationController.class, BlockOrchestrationController.BLOCK_ORCHESTRATION_DEVICE);
    InvokeTestFailure.internalOnlyInvokeTestFailure(InvokeTestFailure.ARTIFICIAL_FAILURE_011);
    try {
        // Execute the volume creations requests
        InvokeTestFailure.internalOnlyInvokeTestFailure(InvokeTestFailure.ARTIFICIAL_FAILURE_012);
        controller.createVolumes(volumeDescriptors, task);
    } catch (InternalException e) {
        _log.error("Controller error when creating volumes", e);
        failVolumeCreateRequest(task, taskList, preparedVolumes, e.getMessage());
        throw e;
    } catch (Exception e) {
        _log.error("Controller error when creating volumes", e);
        failVolumeCreateRequest(task, taskList, preparedVolumes, e.getMessage());
        throw e;
    }
    return taskList;
}
Also used : VolumeDescriptor(com.emc.storageos.blockorchestrationcontroller.VolumeDescriptor) BlockOrchestrationController(com.emc.storageos.blockorchestrationcontroller.BlockOrchestrationController) Volume(com.emc.storageos.db.client.model.Volume) ArrayList(java.util.ArrayList) APIException(com.emc.storageos.svcs.errorhandling.resources.APIException) InternalException(com.emc.storageos.svcs.errorhandling.resources.InternalException) ControllerException(com.emc.storageos.volumecontroller.ControllerException) InternalException(com.emc.storageos.svcs.errorhandling.resources.InternalException)

Aggregations

BlockOrchestrationController (com.emc.storageos.blockorchestrationcontroller.BlockOrchestrationController)20 VolumeDescriptor (com.emc.storageos.blockorchestrationcontroller.VolumeDescriptor)16 Volume (com.emc.storageos.db.client.model.Volume)16 ArrayList (java.util.ArrayList)15 URI (java.net.URI)12 InternalException (com.emc.storageos.svcs.errorhandling.resources.InternalException)11 NamedURI (com.emc.storageos.db.client.model.NamedURI)8 Operation (com.emc.storageos.db.client.model.Operation)8 StorageSystem (com.emc.storageos.db.client.model.StorageSystem)8 TaskList (com.emc.storageos.model.TaskList)7 TaskResourceRep (com.emc.storageos.model.TaskResourceRep)6 VirtualPool (com.emc.storageos.db.client.model.VirtualPool)5 APIException (com.emc.storageos.svcs.errorhandling.resources.APIException)5 HashMap (java.util.HashMap)5 VirtualArray (com.emc.storageos.db.client.model.VirtualArray)4 ControllerException (com.emc.storageos.volumecontroller.ControllerException)4 BlockSnapshot (com.emc.storageos.db.client.model.BlockSnapshot)3 StringSet (com.emc.storageos.db.client.model.StringSet)3 InternalServerErrorException (com.emc.storageos.svcs.errorhandling.resources.InternalServerErrorException)3 Recommendation (com.emc.storageos.volumecontroller.Recommendation)3