Search in sources :

Example 26 with DbClient

use of com.emc.storageos.db.client.DbClient in project coprhd-controller by CoprHD.

the class VNXFileSystemStaticLoadProcessor method processMountList.

/**
 * Process the mountList which are received from XMLAPI server.
 *
 * @param mountList : List of Mount objects.
 * @param keyMap : keyMap.
 */
private void processMountList(final List<Object> mountList, Map<String, Object> keyMap) throws VNXFilePluginException {
    _logger.info("Processing file system mount response....");
    final DbClient dbClient = (DbClient) keyMap.get(VNXFileConstants.DBCLIENT);
    // step -1 get the filesystem capacity map < filesystemid, size>
    Map<String, Long> fsCapList = (HashMap<String, Long>) keyMap.get(VNXFileConstants.FILE_CAPACITY_MAP);
    Map<String, Map<String, Long>> snapCapFsMap = (HashMap<String, Map<String, Long>>) keyMap.get(VNXFileConstants.SNAP_CAPACITY_MAP);
    // step-2 get the snapshot checkpoint size for give filesystem and it is map of filesystem and map <snapshot, checkpointsize>>
    AccessProfile profile = (AccessProfile) keyMap.get(Constants.ACCESSPROFILE);
    // get the storagesystem from db
    StorageSystem storageSystem = dbClient.queryObject(StorageSystem.class, profile.getSystemId());
    List<String> fsList = null;
    Map<String, List<String>> fsMountvNASMap = new HashMap<String, List<String>>();
    Map<String, List<String>> fsMountPhyNASMap = new HashMap<String, List<String>>();
    // step -3 we will get filesystem on VDM or DM
    Iterator<Object> iterator = mountList.iterator();
    if (iterator.hasNext()) {
        Status status = (Status) iterator.next();
        if (status.getMaxSeverity() == Severity.OK) {
            // step -4 get the filesystem list for each mover or VDM in Map
            while (iterator.hasNext()) {
                Mount mount = (Mount) iterator.next();
                if (mount.isMoverIdIsVdm() == true) {
                    fsList = fsMountvNASMap.get(mount.getMover());
                    if (null == fsList) {
                        fsList = new ArrayList<String>();
                    }
                    fsList.add(mount.getFileSystem());
                    // get filesystem list for VDM or vNAS
                    fsMountvNASMap.put(mount.getMover(), fsList);
                    _logger.debug("Filestem or Snapshot {} mounted on vdm {} ", mount.getFileSystem(), mount.getMover());
                } else {
                    fsList = fsMountPhyNASMap.get(mount.getMover());
                    if (null == fsList) {
                        fsList = new ArrayList<String>();
                    }
                    fsList.add(mount.getFileSystem());
                    // get filesystem list for DM or mover
                    fsMountPhyNASMap.put(mount.getMover(), fsList);
                    _logger.debug("Filestem or Snapshot {} mounted on data mover {} ", mount.getFileSystem(), mount.getMover());
                }
            }
            // Log the number of objects mounted on each data mover and virtual data mover!!!
            for (Entry<String, List<String>> eachVNas : fsMountvNASMap.entrySet()) {
                _logger.info(" Virtual data mover {} has Filestem or Snapshot mounts {} ", eachVNas.getKey(), eachVNas.getValue().size());
            }
            for (Entry<String, List<String>> eachNas : fsMountPhyNASMap.entrySet()) {
                _logger.info(" Data mover {} has Filestem or Snapshot mounts {} ", eachNas.getKey(), eachNas.getValue().size());
            }
            Map<String, Long> vdmCapacityMap = new HashMap<String, Long>();
            Map<String, Long> dmCapacityMap = new HashMap<String, Long>();
            vdmCapacityMap = computeMoverCapacity(fsMountvNASMap, fsCapList, snapCapFsMap);
            dmCapacityMap = computeMoverCapacity(fsMountPhyNASMap, fsCapList, snapCapFsMap);
            prepareDBMetrics(storageSystem, dbClient, fsMountPhyNASMap, dmCapacityMap, fsMountvNASMap, vdmCapacityMap);
        } else {
            throw new VNXFilePluginException("Fault response received from XMLAPI Server.", VNXFilePluginException.ERRORCODE_INVALID_RESPONSE);
        }
    }
}
Also used : Status(com.emc.nas.vnxfile.xmlapi.Status) DbClient(com.emc.storageos.db.client.DbClient) HashMap(java.util.HashMap) VNXFilePluginException(com.emc.storageos.plugins.metering.vnxfile.VNXFilePluginException) Mount(com.emc.nas.vnxfile.xmlapi.Mount) AccessProfile(com.emc.storageos.plugins.AccessProfile) ArrayList(java.util.ArrayList) List(java.util.List) URIQueryResultList(com.emc.storageos.db.client.constraint.URIQueryResultList) HashMap(java.util.HashMap) Map(java.util.Map) StringMap(com.emc.storageos.db.client.model.StringMap) StorageSystem(com.emc.storageos.db.client.model.StorageSystem)

Example 27 with DbClient

use of com.emc.storageos.db.client.DbClient in project coprhd-controller by CoprHD.

the class VNXFileSystemUsageProcessor method injectProvisionedCapacity.

/**
 * injects the ProvisionedCapacity from provisioning capacity.
 *
 * @param stat
 * @param keyMap
 */
private void injectProvisionedCapacity(final Stat stat, final Map<String, Object> keyMap) {
    final DbClient dbClient = (DbClient) keyMap.get(VNXFileConstants.DBCLIENT);
    try {
        final FileShare fileObj = dbClient.queryObject(FileShare.class, stat.getResourceId());
        _logger.info("injectProvisioned Capacity existing {} from File System {}", stat.getProvisionedCapacity(), fileObj.getCapacity());
        stat.setProvisionedCapacity(fileObj.getCapacity());
    } catch (final Exception e) {
        _logger.error("No FileShare found using resource {}", stat.getResourceId());
    }
}
Also used : DbClient(com.emc.storageos.db.client.DbClient) FileShare(com.emc.storageos.db.client.model.FileShare) VNXFilePluginException(com.emc.storageos.plugins.metering.vnxfile.VNXFilePluginException) IOException(java.io.IOException) BaseCollectionException(com.emc.storageos.plugins.BaseCollectionException)

Example 28 with DbClient

use of com.emc.storageos.db.client.DbClient in project coprhd-controller by CoprHD.

the class VPlexPerpetualCSVFileCollector method collect.

@Override
public void collect(AccessProfile accessProfile, Map<String, Object> context) {
    init();
    DbClient dbClient = (DbClient) context.get(Constants.dbClient);
    // Get which VPlex array that this applies to
    URI storageSystemURI = accessProfile.getSystemId();
    StorageSystem storageSystem = dbClient.queryObject(StorageSystem.class, storageSystemURI);
    if (storageSystem == null) {
        log.error("Could not find StorageSystem '{}' in DB", storageSystemURI);
        return;
    }
    StringSet providerIds = storageSystem.getProviders();
    for (String providerId : providerIds) {
        StorageProvider provider = dbClient.queryObject(StorageProvider.class, URI.create(providerId));
        LinuxSystemCLI cli = new LinuxSystemCLI(provider.getIPAddress(), provider.getUserName(), provider.getPassword());
        ListVPlexPerpetualCSVFileNames listDataFileNamesCmd = new ListVPlexPerpetualCSVFileNames();
        cli.executeCommand(listDataFileNamesCmd);
        // Process each of the data files that we found on the VPlex management station
        List<String> fileNames = listDataFileNamesCmd.getResults();
        for (String fileName : fileNames) {
            log.info("Processing VPLEX performance statistics file {}", fileName);
            // Extract and hold the data for this data file
            ReadAndParseVPlexPerpetualCSVFile readDataFile = new ReadAndParseVPlexPerpetualCSVFile(fileName);
            cli.executeCommand(readDataFile);
            VPlexPerpetualCSVFileData fileData = readDataFile.getResults();
            // Read the headers and extract those metric names that we're interested in and to which
            // DataObject (StorageHADomain or StoragePort) that it should be associated with. This
            // will be used as a way to look up the object when processing the actual metric data
            Map<String, MetricHeaderInfo> metricNamesToHeaderInfo = processCSVFileDataHeader(dbClient, storageSystem, fileData.getDirectorName(), fileData.getHeaders());
            List<Map<String, String>> dataLines = fileData.getDataLines();
            int lineCount = dataLines.size();
            // There is at least one data point
            if (lineCount > 1) {
                // Determine the last time that metrics were collected.
                Long lastCollectionTimeUTC = getLastCollectionTime(metricNamesToHeaderInfo);
                // Try to find the index into dataLines based on the last collection time.
                // What we're trying to do here is determine the maximum value for the metrics
                // from the last collection time in ViPR, until the last data line in the file.
                int start = fileData.getDataIndexForTime(lastCollectionTimeUTC);
                // Have a mapping of metrics to their maximum value found in the dataLines
                Map<String, Double> maxValues = findMaxMetricValues(dataLines, start, lineCount);
                // Process the metrics for this file
                Map<String, String> last = dataLines.get(lineCount - 1);
                processDirectorStats(metricNamesToHeaderInfo, maxValues, last);
                processPortStats(context, metricNamesToHeaderInfo, maxValues, last);
            }
            // Clean up fileData resources
            fileData.close();
        }
        // Clean out the cache data, so that it's not laying around
        clearCaches();
    }
}
Also used : LinuxSystemCLI(com.iwave.ext.linux.LinuxSystemCLI) DbClient(com.emc.storageos.db.client.DbClient) StorageProvider(com.emc.storageos.db.client.model.StorageProvider) URI(java.net.URI) ContainmentConstraint(com.emc.storageos.db.client.constraint.ContainmentConstraint) StringSet(com.emc.storageos.db.client.model.StringSet) HashMap(java.util.HashMap) Map(java.util.Map) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) StorageSystem(com.emc.storageos.db.client.model.StorageSystem)

Example 29 with DbClient

use of com.emc.storageos.db.client.DbClient in project coprhd-controller by CoprHD.

the class ConnectionManagerUtils method disallowReaping.

public void disallowReaping(Object profile, Object client) throws BaseCollectionException {
    AccessProfile accessProfile = (AccessProfile) profile;
    DbClient dbClient = (DbClient) client;
    try {
        final CIMConnectionFactory connectionFactory = (CIMConnectionFactory) accessProfile.getCimConnectionFactory();
        StorageSystem storageSystem = dbClient.queryObject(StorageSystem.class, accessProfile.getSystemId());
        connectionFactory.setKeepAliveForConnection(storageSystem);
    } catch (final IllegalStateException ex) {
        log.error("Not able to get CIMOM Client instance for ip {} due to ", accessProfile.getIpAddress(), ex);
        throw new SMIPluginException(SMIPluginException.ERRORCODE_NO_WBEMCLIENT, ex.fillInStackTrace(), ex.getMessage());
    }
}
Also used : DbClient(com.emc.storageos.db.client.DbClient) CIMConnectionFactory(com.emc.storageos.volumecontroller.impl.smis.CIMConnectionFactory) SMIPluginException(com.emc.storageos.plugins.metering.smis.SMIPluginException) AccessProfile(com.emc.storageos.plugins.AccessProfile) StorageSystem(com.emc.storageos.db.client.model.StorageSystem)

Example 30 with DbClient

use of com.emc.storageos.db.client.DbClient in project coprhd-controller by CoprHD.

the class SmisAbstractCreateVolumeJob method updateStatus.

/**
 * Called to update the job status when the volume create job completes.
 * <p/>
 * This is common update code for volume create operations.
 *
 * @param jobContext The job context.
 */
@Override
public void updateStatus(JobContext jobContext) throws Exception {
    CloseableIterator<CIMObjectPath> iterator = null;
    DbClient dbClient = jobContext.getDbClient();
    JobStatus jobStatus = getJobStatus();
    try {
        if (jobStatus == JobStatus.IN_PROGRESS) {
            return;
        }
        int volumeCount = 0;
        String opId = getTaskCompleter().getOpId();
        StringBuilder logMsgBuilder = new StringBuilder(String.format("Updating status of job %s to %s", opId, jobStatus.name()));
        CIMConnectionFactory cimConnectionFactory = jobContext.getCimConnectionFactory();
        WBEMClient client = getWBEMClient(dbClient, cimConnectionFactory);
        iterator = client.associatorNames(getCimJob(), null, SmisConstants.CIM_STORAGE_VOLUME, null, null);
        Calendar now = Calendar.getInstance();
        // from pool's reserved capacity map.
        if (jobStatus == JobStatus.SUCCESS || jobStatus == JobStatus.FAILED || jobStatus == JobStatus.FATAL_ERROR) {
            SmisUtils.updateStoragePoolCapacity(dbClient, client, _storagePool);
            StoragePool pool = dbClient.queryObject(StoragePool.class, _storagePool);
            StringMap reservationMap = pool.getReservedCapacityMap();
            for (URI volumeId : getTaskCompleter().getIds()) {
                // remove from reservation map
                reservationMap.remove(volumeId.toString());
            }
            dbClient.persistObject(pool);
        }
        if (jobStatus == JobStatus.SUCCESS) {
            List<URI> volumes = new ArrayList<URI>();
            while (iterator.hasNext()) {
                CIMObjectPath volumePath = iterator.next();
                CIMProperty<String> deviceID = (CIMProperty<String>) volumePath.getKey(SmisConstants.CP_DEVICE_ID);
                String nativeID = deviceID.getValue();
                URI volumeId = getTaskCompleter().getId(volumeCount++);
                volumes.add(volumeId);
                persistVolumeNativeID(dbClient, volumeId, nativeID, now);
                processVolume(jobContext, volumePath, nativeID, volumeId, client, dbClient, logMsgBuilder, now);
            }
            // Add Volumes to Consistency Group (if needed)
            addVolumesToConsistencyGroup(jobContext, volumes);
        } else if (jobStatus == JobStatus.FAILED) {
            if (iterator.hasNext()) {
                while (iterator.hasNext()) {
                    CIMObjectPath volumePath = iterator.next();
                    CIMProperty<String> deviceID = (CIMProperty<String>) volumePath.getKey(SmisConstants.CP_DEVICE_ID);
                    String nativeID = deviceID.getValue();
                    URI volumeId = getTaskCompleter().getId(volumeCount++);
                    if ((nativeID != null) && (nativeID.length() != 0)) {
                        persistVolumeNativeID(dbClient, volumeId, nativeID, now);
                        processVolume(jobContext, volumePath, nativeID, volumeId, client, dbClient, logMsgBuilder, now);
                    } else {
                        logMsgBuilder.append("\n");
                        logMsgBuilder.append(String.format("Task %s failed to create volume: %s", opId, volumeId));
                        Volume volume = dbClient.queryObject(Volume.class, volumeId);
                        volume.setInactive(true);
                        dbClient.persistObject(volume);
                    }
                }
            } else {
                for (URI id : getTaskCompleter().getIds()) {
                    logMsgBuilder.append("\n");
                    logMsgBuilder.append(String.format("Task %s failed to create volume: %s", opId, id.toString()));
                    Volume volume = dbClient.queryObject(Volume.class, id);
                    volume.setInactive(true);
                    dbClient.persistObject(volume);
                }
            }
        }
        _log.info(logMsgBuilder.toString());
    } catch (Exception e) {
        _log.error("Caught an exception while trying to updateStatus for SmisCreateVolumeJob", e);
        setPostProcessingErrorStatus("Encountered an internal error during volume create job status processing : " + e.getMessage());
    } finally {
        super.updateStatus(jobContext);
        if (iterator != null) {
            iterator.close();
        }
    }
}
Also used : StringMap(com.emc.storageos.db.client.model.StringMap) DbClient(com.emc.storageos.db.client.DbClient) StoragePool(com.emc.storageos.db.client.model.StoragePool) Calendar(java.util.Calendar) CIMObjectPath(javax.cim.CIMObjectPath) ArrayList(java.util.ArrayList) URI(java.net.URI) WBEMException(javax.wbem.WBEMException) WorkflowException(com.emc.storageos.workflow.WorkflowException) DeviceControllerException(com.emc.storageos.exceptions.DeviceControllerException) IOException(java.io.IOException) CIMConnectionFactory(com.emc.storageos.volumecontroller.impl.smis.CIMConnectionFactory) CIMProperty(javax.cim.CIMProperty) Volume(com.emc.storageos.db.client.model.Volume) WBEMClient(javax.wbem.client.WBEMClient)

Aggregations

DbClient (com.emc.storageos.db.client.DbClient)253 URI (java.net.URI)155 StorageSystem (com.emc.storageos.db.client.model.StorageSystem)73 Volume (com.emc.storageos.db.client.model.Volume)67 ArrayList (java.util.ArrayList)58 Test (org.junit.Test)42 FileShare (com.emc.storageos.db.client.model.FileShare)34 NamedURI (com.emc.storageos.db.client.model.NamedURI)31 CIMObjectPath (javax.cim.CIMObjectPath)31 BlockSnapshot (com.emc.storageos.db.client.model.BlockSnapshot)29 WBEMClient (javax.wbem.client.WBEMClient)29 StringSet (com.emc.storageos.db.client.model.StringSet)28 CIMConnectionFactory (com.emc.storageos.volumecontroller.impl.smis.CIMConnectionFactory)28 ContainmentConstraint (com.emc.storageos.db.client.constraint.ContainmentConstraint)26 MigrationCallbackException (com.emc.storageos.svcs.errorhandling.resources.MigrationCallbackException)25 AlternateIdConstraint (com.emc.storageos.db.client.constraint.AlternateIdConstraint)22 InternalDbClient (com.emc.storageos.db.client.upgrade.InternalDbClient)22 VNXeApiClient (com.emc.storageos.vnxe.VNXeApiClient)21 CIMInstance (javax.cim.CIMInstance)21 BlockObject (com.emc.storageos.db.client.model.BlockObject)20