Search in sources :

Example 11 with DbClient

use of com.emc.storageos.db.client.DbClient in project coprhd-controller by CoprHD.

the class HDSDeleteVolumeJob method updateStatus.

/**
 * Called to update the job status when the volume delete job completes.
 *
 * @param jobContext The job context.
 */
public void updateStatus(JobContext jobContext) throws Exception {
    DbClient dbClient = jobContext.getDbClient();
    try {
        if (_status == JobStatus.IN_PROGRESS) {
            return;
        }
        StorageSystem storageSystem = dbClient.queryObject(StorageSystem.class, getStorageSystemURI());
        HDSApiClient hdsApiClient = jobContext.getHdsApiFactory().getClient(HDSUtils.getHDSServerManagementServerInfo(storageSystem), storageSystem.getSmisUserName(), storageSystem.getSmisPassword());
        // Get list of volumes; get set of storage pool ids to which they
        // belong.
        List<Volume> volumes = new ArrayList<Volume>();
        Set<URI> poolURIs = new HashSet<URI>();
        for (URI id : getTaskCompleter().getIds()) {
            // Volume volume = dbClient.queryObject(Volume.class, id);
            Volume volume = (Volume) BlockObject.fetch(dbClient, id);
            if (volume != null && !volume.getInactive()) {
                volumes.add(volume);
                poolURIs.add(volume.getPool());
            }
        }
        // If terminal state update storage pool capacity
        if (_status == JobStatus.SUCCESS || _status == JobStatus.FAILED) {
            // Update capacity of storage pools.
            for (URI poolURI : poolURIs) {
                StoragePool storagePool = dbClient.queryObject(StoragePool.class, poolURI);
                HDSUtils.updateStoragePoolCapacity(dbClient, hdsApiClient, storagePool);
            }
        }
        StringBuilder logMsgBuilder = new StringBuilder();
        if (_status == JobStatus.SUCCESS) {
            for (Volume volume : volumes) {
                if (logMsgBuilder.length() != 0) {
                    logMsgBuilder.append("\n");
                }
                logMsgBuilder.append(String.format("Successfully deleted volume %s", volume.getId()));
            }
        } else if (_status == JobStatus.FAILED) {
            for (URI id : getTaskCompleter().getIds()) {
                if (logMsgBuilder.length() != 0) {
                    logMsgBuilder.append("\n");
                }
                logMsgBuilder.append(String.format("Failed to delete volume: %s", id));
            }
        }
        if (logMsgBuilder.length() > 0) {
            _log.info(logMsgBuilder.toString());
        }
    } catch (Exception e) {
        setPostProcessingErrorStatus("Encountered an internal error during delete volume job status processing: " + e.getMessage());
        _log.error("Caught exception while handling updateStatus for delete volume job.", e);
    } finally {
        super.updateStatus(jobContext);
    }
}
Also used : HDSApiClient(com.emc.storageos.hds.api.HDSApiClient) DbClient(com.emc.storageos.db.client.DbClient) StoragePool(com.emc.storageos.db.client.model.StoragePool) Volume(com.emc.storageos.db.client.model.Volume) ArrayList(java.util.ArrayList) URI(java.net.URI) StorageSystem(com.emc.storageos.db.client.model.StorageSystem) HashSet(java.util.HashSet)

Example 12 with DbClient

use of com.emc.storageos.db.client.DbClient in project coprhd-controller by CoprHD.

the class HDSVolumeExpandJob method updateStatus.

/**
 * Called to update the job status when the volume expand job completes.
 *
 * @param jobContext
 *            The job context.
 */
@Override
public void updateStatus(JobContext jobContext) throws Exception {
    LogicalUnit logicalUnit = null;
    try {
        if (_status == JobStatus.IN_PROGRESS) {
            return;
        }
        DbClient dbClient = jobContext.getDbClient();
        StorageSystem storageSystem = dbClient.queryObject(StorageSystem.class, getStorageSystemURI());
        HDSApiClient hdsApiClient = jobContext.getHdsApiFactory().getClient(HDSUtils.getHDSServerManagementServerInfo(storageSystem), storageSystem.getSmisUserName(), storageSystem.getSmisPassword());
        // from pool's reserved capacity map.
        if (_status == JobStatus.SUCCESS || _status == JobStatus.FAILED) {
            StoragePool storagePool = dbClient.queryObject(StoragePool.class, storagePoolURI);
            HDSUtils.updateStoragePoolCapacity(dbClient, hdsApiClient, storagePool);
            StringMap reservationMap = storagePool.getReservedCapacityMap();
            URI volumeId = getTaskCompleter().getId();
            // remove from reservation map
            reservationMap.remove(volumeId.toString());
            dbClient.persistObject(storagePool);
        }
        String opId = getTaskCompleter().getOpId();
        StringBuilder logMsgBuilder = new StringBuilder(String.format("Updating status of job %s to %s, task: %s", this.getJobName(), _status.name(), opId));
        if (_status == JobStatus.SUCCESS) {
            VolumeExpandCompleter taskCompleter = (VolumeExpandCompleter) getTaskCompleter();
            Volume volume = dbClient.queryObject(Volume.class, taskCompleter.getId());
            // set requested capacity
            volume.setCapacity(taskCompleter.getSize());
            // set meta related properties
            volume.setIsComposite(taskCompleter.isComposite());
            volume.setCompositionType(taskCompleter.getMetaVolumeType());
            logicalUnit = (LogicalUnit) _javaResult.getBean("logicalunit");
            if (null != logicalUnit) {
                long capacityInBytes = (Long.valueOf(logicalUnit.getCapacityInKB())) * 1024L;
                volume.setProvisionedCapacity(capacityInBytes);
                volume.setAllocatedCapacity(capacityInBytes);
            }
            logMsgBuilder.append(String.format("%n   Capacity: %s, Provisioned capacity: %s, Allocated Capacity: %s", volume.getCapacity(), volume.getProvisionedCapacity(), volume.getAllocatedCapacity()));
            if (volume.getIsComposite()) {
                logMsgBuilder.append(String.format("%n  Is Meta: %s, Total meta member capacity: %s, Meta member count %s, Meta member size: %s", volume.getIsComposite(), volume.getTotalMetaMemberCapacity(), volume.getMetaMemberCount(), volume.getMetaMemberSize()));
            }
            _log.info(logMsgBuilder.toString());
            dbClient.persistObject(volume);
            // Reset list of meta members native ids in WF data (when meta
            // is created meta members are removed from array)
            WorkflowService.getInstance().storeStepData(opId, new ArrayList<String>());
        }
    } catch (Exception e) {
        _log.error("Caught an exception while trying to updateStatus for HDSVolumeExpandJob", e);
        setErrorStatus("Encountered an internal error during volume expand job status processing : " + e.getMessage());
    } finally {
        super.updateStatus(jobContext);
    }
}
Also used : HDSApiClient(com.emc.storageos.hds.api.HDSApiClient) StringMap(com.emc.storageos.db.client.model.StringMap) DbClient(com.emc.storageos.db.client.DbClient) StoragePool(com.emc.storageos.db.client.model.StoragePool) VolumeExpandCompleter(com.emc.storageos.volumecontroller.impl.block.taskcompleter.VolumeExpandCompleter) LogicalUnit(com.emc.storageos.hds.model.LogicalUnit) URI(java.net.URI) Volume(com.emc.storageos.db.client.model.Volume) StorageSystem(com.emc.storageos.db.client.model.StorageSystem)

Example 13 with DbClient

use of com.emc.storageos.db.client.DbClient in project coprhd-controller by CoprHD.

the class AbstractCinderVolumeCreateJob method updateStatus.

/**
 * Called to update the job status when the volume create job completes.
 * This is common update code for volume create operations.
 *
 * @param jobContext The job context.
 */
@Override
public void updateStatus(JobContext jobContext) throws Exception {
    DbClient dbClient = jobContext.getDbClient();
    try {
        // Do nothing if the job is not completed yet
        if (status == JobStatus.IN_PROGRESS) {
            return;
        }
        String opId = getTaskCompleter().getOpId();
        StringBuilder logMsgBuilder = new StringBuilder(String.format("Updating status of job %s to %s", opId, status.name()));
        StorageSystem storageSystem = dbClient.queryObject(StorageSystem.class, getStorageSystemURI());
        CinderApi cinderApi = jobContext.getCinderApiFactory().getApi(storageSystem.getActiveProviderURI(), getEndPointInfo());
        // If terminal state update storage pool capacity and remove reservation for volumes capacity
        // from pool's reserved capacity map.
        StoragePool storagePool = null;
        if (status == JobStatus.SUCCESS || status == JobStatus.FAILED) {
            storagePool = dbClient.queryObject(StoragePool.class, storagePoolUri);
            StringMap reservationMap = storagePool.getReservedCapacityMap();
            for (URI volumeId : getTaskCompleter().getIds()) {
                // remove from reservation map
                reservationMap.remove(volumeId.toString());
            }
            dbClient.persistObject(storagePool);
        }
        if (status == JobStatus.SUCCESS) {
            List<URI> volumes = new ArrayList<URI>();
            Calendar now = Calendar.getInstance();
            URI volumeId = getTaskCompleter().getId();
            volumes.add(volumeId);
            for (Map.Entry<String, URI> entry : volumeIds.entrySet()) {
                VolumeShowResponse volumeDetails = cinderApi.showVolume(entry.getKey());
                processVolume(entry.getValue(), volumeDetails, dbClient, now, logMsgBuilder);
                // Adjust the storage pool's capacity
                CinderUtils.updateStoragePoolCapacity(dbClient, cinderApi, storagePool, volumeDetails.volume.size, false);
            }
        } else if (status == JobStatus.FAILED) {
            for (URI id : getTaskCompleter().getIds()) {
                logMsgBuilder.append("\n");
                logMsgBuilder.append(String.format("Task %s failed to create volume: %s", opId, id.toString()));
                Volume volume = dbClient.queryObject(Volume.class, id);
                volume.setInactive(true);
                dbClient.persistObject(volume);
            }
        }
        logger.info(logMsgBuilder.toString());
    } catch (Exception e) {
        logger.error("Caught an exception while trying to updateStatus for CinderCreateVolumeJob", e);
        setErrorStatus("Encountered an internal error during volume create job status processing : " + e.getMessage());
    } finally {
        super.updateStatus(jobContext);
    }
}
Also used : StringMap(com.emc.storageos.db.client.model.StringMap) DbClient(com.emc.storageos.db.client.DbClient) StoragePool(com.emc.storageos.db.client.model.StoragePool) Calendar(java.util.Calendar) ArrayList(java.util.ArrayList) CinderApi(com.emc.storageos.cinder.api.CinderApi) URI(java.net.URI) VolumeShowResponse(com.emc.storageos.cinder.model.VolumeShowResponse) IOException(java.io.IOException) Volume(com.emc.storageos.db.client.model.Volume) Map(java.util.Map) StringMap(com.emc.storageos.db.client.model.StringMap) StorageSystem(com.emc.storageos.db.client.model.StorageSystem)

Example 14 with DbClient

use of com.emc.storageos.db.client.DbClient in project coprhd-controller by CoprHD.

the class CinderSnapshotDeleteJob method updateStatus.

/**
 * Called to update the job status when the volume delete job completes. *
 *
 * @param jobContext The job context.
 */
public void updateStatus(JobContext jobContext) throws Exception {
    DbClient dbClient = jobContext.getDbClient();
    try {
        if (status == JobStatus.IN_PROGRESS) {
            return;
        }
        StringBuilder logMsgBuilder = new StringBuilder();
        URI snapshotId = getTaskCompleter().getId();
        BlockSnapshot snapshot = dbClient.queryObject(BlockSnapshot.class, snapshotId);
        if (status == JobStatus.SUCCESS) {
            snapshot.setInactive(true);
            dbClient.persistObject(snapshot);
            if (logMsgBuilder.length() != 0) {
                logMsgBuilder.append("\n");
            }
            logMsgBuilder.append(String.format("Successfully deleted snapshot %s", snapshot.getId()));
        } else if (status == JobStatus.FAILED) {
            if (logMsgBuilder.length() != 0) {
                logMsgBuilder.append("\n");
            }
            logMsgBuilder.append(String.format("Failed to delete snapshot %s", snapshot.getId()));
        }
        if (logMsgBuilder.length() > 0) {
            _logger.info(logMsgBuilder.toString());
        }
    } catch (Exception e) {
        setErrorStatus("Encountered an internal error during delete snapshot job status processing: " + e.getMessage());
        super.updateStatus(jobContext);
        _logger.error("Caught exception while handling updateStatus for delete snapshot job.", e);
    } finally {
        super.updateStatus(jobContext);
    }
}
Also used : DbClient(com.emc.storageos.db.client.DbClient) BlockSnapshot(com.emc.storageos.db.client.model.BlockSnapshot) URI(java.net.URI) CinderException(com.emc.storageos.cinder.errorhandling.CinderException)

Example 15 with DbClient

use of com.emc.storageos.db.client.DbClient in project coprhd-controller by CoprHD.

the class CinderVolumeExpandJob method updateStatus.

@Override
public void updateStatus(JobContext jobContext) throws Exception {
    DbClient dbClient = jobContext.getDbClient();
    try {
        // Do nothing if the job is not completed yet
        if (status == JobStatus.IN_PROGRESS) {
            return;
        }
        String opId = getTaskCompleter().getOpId();
        _logger.info(String.format("Updating status of job %s to %s", opId, status.name()));
        StorageSystem storageSystem = dbClient.queryObject(StorageSystem.class, getStorageSystemURI());
        CinderApi cinderApi = jobContext.getCinderApiFactory().getApi(storageSystem.getActiveProviderURI(), getEndPointInfo());
        URI volumeId = getTaskCompleter().getId();
        // If terminal state update storage pool capacity and remove reservation for volume capacity
        // from pool's reserved capacity map.
        StoragePool storagePool = null;
        if (status == JobStatus.SUCCESS || status == JobStatus.FAILED) {
            storagePool = dbClient.queryObject(StoragePool.class, storagePoolUri);
            StringMap reservationMap = storagePool.getReservedCapacityMap();
            // remove from reservation map
            reservationMap.remove(volumeId.toString());
            dbClient.persistObject(storagePool);
        }
        if (status == JobStatus.SUCCESS) {
            VolumeExpandCompleter taskCompleter = (VolumeExpandCompleter) getTaskCompleter();
            Volume volume = dbClient.queryObject(Volume.class, taskCompleter.getId());
            long oldCapacity = volume.getCapacity();
            long newCapacity = taskCompleter.getSize();
            // set requested capacity
            volume.setCapacity(newCapacity / CinderConstants.BYTES_TO_GB);
            volume.setProvisionedCapacity(taskCompleter.getSize());
            volume.setAllocatedCapacity(taskCompleter.getSize());
            dbClient.persistObject(volume);
            long increasedCapacity = newCapacity - oldCapacity;
            CinderUtils.updateStoragePoolCapacity(dbClient, cinderApi, storagePool, String.valueOf(increasedCapacity / CinderConstants.BYTES_TO_GB), false);
        }
    } catch (Exception e) {
        _logger.error("Caught an exception while trying to updateStatus for CinderExpandVolumeJob", e);
        setErrorStatus("Encountered an internal error during expand volume job status processing : " + e.getMessage());
    } finally {
        super.updateStatus(jobContext);
    }
}
Also used : StringMap(com.emc.storageos.db.client.model.StringMap) DbClient(com.emc.storageos.db.client.DbClient) StoragePool(com.emc.storageos.db.client.model.StoragePool) VolumeExpandCompleter(com.emc.storageos.volumecontroller.impl.block.taskcompleter.VolumeExpandCompleter) Volume(com.emc.storageos.db.client.model.Volume) CinderApi(com.emc.storageos.cinder.api.CinderApi) URI(java.net.URI) StorageSystem(com.emc.storageos.db.client.model.StorageSystem)

Aggregations

DbClient (com.emc.storageos.db.client.DbClient)253 URI (java.net.URI)155 StorageSystem (com.emc.storageos.db.client.model.StorageSystem)73 Volume (com.emc.storageos.db.client.model.Volume)67 ArrayList (java.util.ArrayList)58 Test (org.junit.Test)42 FileShare (com.emc.storageos.db.client.model.FileShare)34 NamedURI (com.emc.storageos.db.client.model.NamedURI)31 CIMObjectPath (javax.cim.CIMObjectPath)31 BlockSnapshot (com.emc.storageos.db.client.model.BlockSnapshot)29 WBEMClient (javax.wbem.client.WBEMClient)29 StringSet (com.emc.storageos.db.client.model.StringSet)28 CIMConnectionFactory (com.emc.storageos.volumecontroller.impl.smis.CIMConnectionFactory)28 ContainmentConstraint (com.emc.storageos.db.client.constraint.ContainmentConstraint)26 MigrationCallbackException (com.emc.storageos.svcs.errorhandling.resources.MigrationCallbackException)25 AlternateIdConstraint (com.emc.storageos.db.client.constraint.AlternateIdConstraint)22 InternalDbClient (com.emc.storageos.db.client.upgrade.InternalDbClient)22 VNXeApiClient (com.emc.storageos.vnxe.VNXeApiClient)21 CIMInstance (javax.cim.CIMInstance)21 BlockObject (com.emc.storageos.db.client.model.BlockObject)20