Search in sources :

Example 41 with HDSApiClient

use of com.emc.storageos.hds.api.HDSApiClient in project coprhd-controller by CoprHD.

the class HDSStorageDevice method validateStorageProviderConnection.

@Override
public boolean validateStorageProviderConnection(String ipAddress, Integer portNumber) {
    boolean isConnectionValid = false;
    try {
        StringBuffer providerID = new StringBuffer(ipAddress).append(HDSConstants.HYPHEN_OPERATOR).append(portNumber);
        URIQueryResultList providerUriList = new URIQueryResultList();
        dbClient.queryByConstraint(AlternateIdConstraint.Factory.getStorageProviderByProviderIDConstraint(providerID.toString()), providerUriList);
        if (providerUriList.iterator().hasNext()) {
            StorageProvider provider = dbClient.queryObject(StorageProvider.class, providerUriList.iterator().next());
            HDSApiClient hdsApiClient = hdsApiFactory.getClient(HDSUtils.getHDSServerManagementServerInfo(provider), provider.getUserName(), provider.getPassword());
            List<StorageArray> storageArrayList = hdsApiClient.getStorageSystemsInfo();
            if (null != storageArrayList && !storageArrayList.isEmpty()) {
                isConnectionValid = true;
            }
        }
    } catch (Exception ex) {
        log.error("Problem in checking provider live connection for ipaddress: {} due to", ipAddress, ex);
    }
    return isConnectionValid;
}
Also used : HDSApiClient(com.emc.storageos.hds.api.HDSApiClient) StorageProvider(com.emc.storageos.db.client.model.StorageProvider) URIQueryResultList(com.emc.storageos.db.client.constraint.URIQueryResultList) InternalException(com.emc.storageos.svcs.errorhandling.resources.InternalException) DatabaseException(com.emc.storageos.db.exceptions.DatabaseException) DeviceControllerException(com.emc.storageos.exceptions.DeviceControllerException) HDSException(com.emc.storageos.hds.HDSException) StorageArray(com.emc.storageos.hds.model.StorageArray)

Example 42 with HDSApiClient

use of com.emc.storageos.hds.api.HDSApiClient in project coprhd-controller by CoprHD.

the class HDSStorageDevice method doWaitForSynchronized.

/*
     * (non-Javadoc)
     * 
     * @see
     * com.emc.storageos.volumecontroller.BlockStorageDevice#doWaitForSynchronized
     * (java.lang.Class, com.emc.storageos.db.client.model.StorageSystem,
     * java.net.URI, com.emc.storageos.volumecontroller.TaskCompleter)
     */
@Override
public void doWaitForSynchronized(Class<? extends BlockObject> clazz, StorageSystem storageObj, URI target, TaskCompleter completer) {
    log.info("START waitForSynchronized for {}", target);
    try {
        Volume targetObj = dbClient.queryObject(Volume.class, target);
        // Source could be either Volume or BlockSnapshot
        BlockObject sourceObj = BlockObject.fetch(dbClient, targetObj.getAssociatedSourceVolume());
        // We split the pair which causes the data to be synchronized.
        // When the split is complete that data is synchronized.
        HDSApiClient hdsApiClient = hdsApiFactory.getClient(HDSUtils.getHDSServerManagementServerInfo(storageObj), storageObj.getSmisUserName(), storageObj.getSmisPassword());
        HDSApiProtectionManager hdsApiProtectionManager = hdsApiClient.getHdsApiProtectionManager();
        String replicationGroupObjectID = hdsApiProtectionManager.getReplicationGroupObjectId();
        ReplicationInfo replicationInfo = hdsApiProtectionManager.getReplicationInfoFromSystem(sourceObj.getNativeId(), targetObj.getNativeId()).first;
        hdsApiProtectionManager.modifyShadowImagePair(replicationGroupObjectID, replicationInfo.getObjectID(), HDSApiProtectionManager.ShadowImageOperationType.split, storageObj.getModel());
        // Update state in case we are waiting for synchronization
        // after creation of a new full copy that was not created
        // inactive.
        String state = targetObj.getReplicaState();
        if (!ReplicationState.SYNCHRONIZED.name().equals(state)) {
            targetObj.setSyncActive(true);
            targetObj.setReplicaState(ReplicationState.SYNCHRONIZED.name());
            dbClient.persistObject(targetObj);
        }
        // Queue job to wait for replication status to move to split.
        ControllerServiceImpl.enqueueJob(new QueueJob(new HDSReplicationSyncJob(storageObj.getId(), sourceObj.getNativeId(), targetObj.getNativeId(), ReplicationStatus.SPLIT, completer)));
    } catch (Exception e) {
        log.error("Exception occurred while waiting for synchronization", e);
        ServiceError serviceError = DeviceControllerException.errors.jobFailed(e);
        completer.error(dbClient, serviceError);
    }
    log.info("completed doWaitForSynchronized");
}
Also used : HDSReplicationSyncJob(com.emc.storageos.volumecontroller.impl.hds.prov.job.HDSReplicationSyncJob) HDSApiClient(com.emc.storageos.hds.api.HDSApiClient) ServiceError(com.emc.storageos.svcs.errorhandling.model.ServiceError) Volume(com.emc.storageos.db.client.model.Volume) HDSApiProtectionManager(com.emc.storageos.hds.api.HDSApiProtectionManager) ReplicationInfo(com.emc.storageos.hds.model.ReplicationInfo) QueueJob(com.emc.storageos.volumecontroller.impl.job.QueueJob) BlockObject(com.emc.storageos.db.client.model.BlockObject) InternalException(com.emc.storageos.svcs.errorhandling.resources.InternalException) DatabaseException(com.emc.storageos.db.exceptions.DatabaseException) DeviceControllerException(com.emc.storageos.exceptions.DeviceControllerException) HDSException(com.emc.storageos.hds.HDSException)

Example 43 with HDSApiClient

use of com.emc.storageos.hds.api.HDSApiClient in project coprhd-controller by CoprHD.

the class HDSStorageDevice method doCleanupMetaMembers.

/*
     * (non-Javadoc)
     * 
     * @see com.emc.storageos.volumecontroller.BlockStorageDevice#doCleanupMetaMembers(com.emc.storageos.db.client.model.StorageSystem,
     * com.emc.storageos.db.client.model.Volume,
     * com.emc.storageos.volumecontroller.impl.block.taskcompleter.CleanupMetaVolumeMembersCompleter)
     */
@Override
public void doCleanupMetaMembers(StorageSystem storageSystem, Volume volume, CleanupMetaVolumeMembersCompleter cleanupCompleter) throws DeviceControllerException {
    // Remove meta member volumes from storage device
    try {
        log.info(String.format("doCleanupMetaMembers  Start - Array: %s, Volume: %s", storageSystem.getSerialNumber(), volume.getLabel()));
        // Load meta volume members from WF data
        String sourceStepId = cleanupCompleter.getSourceStepId();
        HDSApiClient hdsApiClient = hdsApiFactory.getClient(HDSUtils.getHDSServerManagementServerInfo(storageSystem), storageSystem.getUsername(), storageSystem.getSmisPassword());
        List<String> metaMembers = (ArrayList<String>) WorkflowService.getInstance().loadStepData(sourceStepId);
        if (metaMembers != null && !metaMembers.isEmpty()) {
            log.info(String.format("doCleanupMetaMembers: Members stored for meta volume: %n %s", metaMembers));
            // Check if volumes still exist in array and if it is not composite member (already
            // added to the meta volume)
            Set<String> volumeIds = new HashSet<String>();
            for (String logicalUnitObjectId : metaMembers) {
                LogicalUnit logicalUnit = hdsApiClient.getLogicalUnitInfo(HDSUtils.getSystemObjectID(storageSystem), logicalUnitObjectId);
                if (logicalUnit != null) {
                    log.debug("doCleanupMetaMembers: Volume: " + logicalUnitObjectId + ", Usage of volume: " + logicalUnit.getComposite());
                    if (logicalUnit.getComposite() != HDSConstants.COMPOSITE_ELEMENT_MEMBER) {
                        volumeIds.add(logicalUnitObjectId);
                    }
                }
            }
            if (volumeIds.isEmpty()) {
                log.info("doCleanupMetaMembers: No meta members to cleanup in array.");
                cleanupCompleter.ready(dbClient);
            } else {
                log.info(String.format("doCleanupMetaMembers: Members to cleanup in array: %n   %s", volumeIds));
                // Prepare parameters and call method to delete meta members from array
                HDSCleanupMetaVolumeMembersJob hdsJobCompleter = null;
                // When "cleanup" is separate workflow step, call async (for example rollback
                // step in volume expand)
                // Otherwise, call synchronously (for example when cleanup is part of meta
                // volume create rollback)
                String asyncMessageId = hdsApiClient.deleteThickLogicalUnits(HDSUtils.getSystemObjectID(storageSystem), volumeIds, storageSystem.getModel());
                if (asyncMessageId == null) {
                    throw HDSException.exceptions.asyncTaskFailed("Unable to get async taskId from HiCommand Device Manager for the delete volume call");
                }
                if (cleanupCompleter.isWFStep()) {
                    if (asyncMessageId != null) {
                        ControllerServiceImpl.enqueueJob(new QueueJob(new HDSCleanupMetaVolumeMembersJob(asyncMessageId, storageSystem.getId(), volume.getId(), cleanupCompleter)));
                    }
                } else {
                    // invoke synchronously
                    hdsJobCompleter = new HDSCleanupMetaVolumeMembersJob(asyncMessageId, storageSystem.getId(), volume.getId(), cleanupCompleter);
                    ((HDSMetaVolumeOperations) metaVolumeOperations).invokeMethodSynchronously(hdsApiFactory, asyncMessageId, hdsJobCompleter);
                }
            }
        } else {
            log.info("doCleanupMetaMembers: No meta members stored for meta volume. Nothing to cleanup in array.");
            cleanupCompleter.ready(dbClient);
        }
    } catch (Exception e) {
        log.error("Problem in doCleanupMetaMembers: ", e);
        ServiceError error = DeviceControllerErrors.smis.methodFailed("doCleanupMetaMembers", e.getMessage());
        cleanupCompleter.error(dbClient, error);
    }
    log.info(String.format("doCleanupMetaMembers End - Array: %s,  Volume: %s", storageSystem.getSerialNumber(), volume.getLabel()));
}
Also used : HDSApiClient(com.emc.storageos.hds.api.HDSApiClient) ServiceError(com.emc.storageos.svcs.errorhandling.model.ServiceError) HDSCleanupMetaVolumeMembersJob(com.emc.storageos.volumecontroller.impl.hds.prov.job.HDSCleanupMetaVolumeMembersJob) LogicalUnit(com.emc.storageos.hds.model.LogicalUnit) ArrayList(java.util.ArrayList) QueueJob(com.emc.storageos.volumecontroller.impl.job.QueueJob) InternalException(com.emc.storageos.svcs.errorhandling.resources.InternalException) DatabaseException(com.emc.storageos.db.exceptions.DatabaseException) DeviceControllerException(com.emc.storageos.exceptions.DeviceControllerException) HDSException(com.emc.storageos.hds.HDSException) HashSet(java.util.HashSet)

Example 44 with HDSApiClient

use of com.emc.storageos.hds.api.HDSApiClient in project coprhd-controller by CoprHD.

the class HDSAbstractCreateVolumeJob method updateStatus.

/**
 * Called to update the job status when the volume create job completes.
 * <p/>
 * This is common update code for volume create operations.
 *
 * @param jobContext The job context.
 */
@Override
public void updateStatus(JobContext jobContext) throws Exception {
    List<LogicalUnit> luList = null;
    DbClient dbClient = jobContext.getDbClient();
    try {
        if (_status == JobStatus.IN_PROGRESS) {
            return;
        }
        int volumeCount = 0;
        String opId = getTaskCompleter().getOpId();
        StringBuilder logMsgBuilder = new StringBuilder(String.format("Updating status of job %s to %s", opId, _status.name()));
        StorageSystem storageSystem = dbClient.queryObject(StorageSystem.class, getStorageSystemURI());
        HDSApiClient hdsApiClient = jobContext.getHdsApiFactory().getClient(HDSUtils.getHDSServerManagementServerInfo(storageSystem), storageSystem.getSmisUserName(), storageSystem.getSmisPassword());
        // from pool's reserved capacity map.
        if (_status == JobStatus.SUCCESS || _status == JobStatus.FAILED) {
            StoragePool storagePool = dbClient.queryObject(StoragePool.class, storagePoolURI);
            HDSUtils.updateStoragePoolCapacity(dbClient, hdsApiClient, storagePool);
            StringMap reservationMap = storagePool.getReservedCapacityMap();
            for (URI volumeId : getTaskCompleter().getIds()) {
                // remove from reservation map
                reservationMap.remove(volumeId.toString());
            }
            dbClient.persistObject(storagePool);
        }
        boolean isThinVolumeRequest = checkThinVolumesRequest(getTaskCompleter().getIds(), dbClient);
        if (_status == JobStatus.SUCCESS) {
            List<URI> volumes = new ArrayList<URI>();
            luList = getLuListBasedOnModel(storageSystem, _javaResult, isThinVolumeRequest);
            Iterator<LogicalUnit> luListItr = luList.iterator();
            Calendar now = Calendar.getInstance();
            while (luListItr.hasNext()) {
                LogicalUnit logicalUnit = luListItr.next();
                URI volumeId = getTaskCompleter().getId(volumeCount++);
                volumes.add(volumeId);
                processVolume(volumeId, logicalUnit, dbClient, hdsApiClient, now, logMsgBuilder);
            }
        } else if (_status == JobStatus.FAILED) {
            for (URI id : getTaskCompleter().getIds()) {
                logMsgBuilder.append("\n");
                logMsgBuilder.append(String.format("Task %s failed to create volume: %s", opId, id.toString()));
                BlockObject object = BlockObject.fetch(dbClient, id);
                if (object != null) {
                    object.setInactive(true);
                    dbClient.persistObject(object);
                }
            }
        }
        _log.info(logMsgBuilder.toString());
    } catch (Exception e) {
        _log.error("Caught an exception while trying to updateStatus for HDSCreateVolumeJob", e);
        setErrorStatus("Encountered an internal error during volume create job status processing : " + e.getMessage());
    } finally {
        super.updateStatus(jobContext);
    }
}
Also used : HDSApiClient(com.emc.storageos.hds.api.HDSApiClient) StringMap(com.emc.storageos.db.client.model.StringMap) DbClient(com.emc.storageos.db.client.DbClient) StoragePool(com.emc.storageos.db.client.model.StoragePool) LogicalUnit(com.emc.storageos.hds.model.LogicalUnit) Calendar(java.util.Calendar) ArrayList(java.util.ArrayList) URI(java.net.URI) DatabaseException(com.emc.storageos.db.exceptions.DatabaseException) IOException(java.io.IOException) BlockObject(com.emc.storageos.db.client.model.BlockObject) StorageSystem(com.emc.storageos.db.client.model.StorageSystem)

Example 45 with HDSApiClient

use of com.emc.storageos.hds.api.HDSApiClient in project coprhd-controller by CoprHD.

the class HDSReplicationSyncJob method poll.

/**
 */
@Override
public JobPollResult poll(JobContext jobContext, long trackingPeriodInMillis) {
    try {
        StorageSystem storageSystem = jobContext.getDbClient().queryObject(StorageSystem.class, getStorageSystemURI());
        // log.info("HDSJob: Looking up job: id {}, provider: {} ", messageId, storageSystem.getActiveProviderURI());
        HDSApiClient hdsApiClient = jobContext.getHdsApiFactory().getClient(HDSUtils.getHDSServerManagementServerInfo(storageSystem), storageSystem.getSmisUserName(), storageSystem.getSmisPassword());
        if (hdsApiClient == null) {
            String errorMessage = "No HDS client found for provider ip: " + storageSystem.getActiveProviderURI();
            processTransientError(trackingPeriodInMillis, errorMessage, null);
        } else {
            HDSApiProtectionManager apiProtectionManager = hdsApiClient.getHdsApiProtectionManager();
            Pair<ReplicationInfo, String> response = apiProtectionManager.getReplicationInfoFromSystem(sourceNativeId, targetNativeId);
            ReplicationStatus status = ReplicationStatus.UNKNOWN;
            if (response != null) {
                status = ReplicationStatus.getReplicationStatusFromCode(response.first.getStatus());
                log.info("Expected status :{}", expectedStatus.name());
                log.info("Current replication status :{}", status.name());
                if (expectedStatus == status) {
                    _status = JobStatus.SUCCESS;
                    _pollResult.setJobPercentComplete(100);
                    log.info("HDSReplicationSyncJob: {} {} succeeded", sourceNativeId, targetNativeId);
                } else if (!status.isErrorStatus()) {
                    /**
                     * HiCommand Device Manager is having issue to get the modified replication info
                     * status from pair management server. To get the latest pair status from device manager,
                     * we have introduced a workaround to trigger pair mgmt server host update call.
                     * Once Device Manager has a fix for this issue, we can revert this work around.
                     *
                     * Refreshing host (Pair Mgmt Server) for every 10th polling.
                     */
                    if (++pollingCount % 10 == 0) {
                        log.info("Attempting to refresh pair managerment server :{}", response.second);
                        apiProtectionManager.refreshPairManagementServer(response.second);
                    }
                }
            }
            if (response == null || status.isErrorStatus()) {
                _status = JobStatus.FAILED;
                _pollResult.setJobPercentComplete(100);
                _errorDescription = String.format("Replication Status %1$s", new Object[] { status.name() });
                log.error("HDSReplicationSyncJob: {} failed; Details: {}", getJobName(), _errorDescription);
            }
        }
    } catch (Exception e) {
        processTransientError(trackingPeriodInMillis, e.getMessage(), e);
        log.error(e.getMessage(), e);
    } finally {
        try {
            _postProcessingStatus = JobStatus.SUCCESS;
            updateStatus(jobContext);
            if (_postProcessingStatus == JobStatus.ERROR) {
                processPostProcessingError(trackingPeriodInMillis, _errorDescription, null);
            }
        } catch (Exception e) {
            setErrorStatus(e.getMessage());
            log.error("Problem while trying to update status", e);
        }
    }
    _pollResult.setJobStatus(_status);
    _pollResult.setErrorDescription(_errorDescription);
    return _pollResult;
}
Also used : HDSApiClient(com.emc.storageos.hds.api.HDSApiClient) HDSApiProtectionManager(com.emc.storageos.hds.api.HDSApiProtectionManager) ReplicationInfo(com.emc.storageos.hds.model.ReplicationInfo) StorageSystem(com.emc.storageos.db.client.model.StorageSystem)

Aggregations

HDSApiClient (com.emc.storageos.hds.api.HDSApiClient)45 DeviceControllerException (com.emc.storageos.exceptions.DeviceControllerException)26 HDSException (com.emc.storageos.hds.HDSException)26 ServiceError (com.emc.storageos.svcs.errorhandling.model.ServiceError)21 Volume (com.emc.storageos.db.client.model.Volume)14 DatabaseException (com.emc.storageos.db.exceptions.DatabaseException)13 HDSApiExportManager (com.emc.storageos.hds.api.HDSApiExportManager)12 HostStorageDomain (com.emc.storageos.hds.model.HostStorageDomain)12 ArrayList (java.util.ArrayList)12 StorageSystem (com.emc.storageos.db.client.model.StorageSystem)11 HashSet (java.util.HashSet)11 ExportMask (com.emc.storageos.db.client.model.ExportMask)10 LogicalUnit (com.emc.storageos.hds.model.LogicalUnit)10 URI (java.net.URI)10 HDSJob (com.emc.storageos.volumecontroller.impl.hds.prov.job.HDSJob)9 QueueJob (com.emc.storageos.volumecontroller.impl.job.QueueJob)8 ReplicationInfo (com.emc.storageos.hds.model.ReplicationInfo)7 InternalException (com.emc.storageos.svcs.errorhandling.resources.InternalException)7 StoragePool (com.emc.storageos.db.client.model.StoragePool)6 HDSApiProtectionManager (com.emc.storageos.hds.api.HDSApiProtectionManager)6