Search in sources :

Example 41 with BaseCollectionException

use of com.emc.storageos.plugins.BaseCollectionException in project coprhd-controller by CoprHD.

the class StorageVolumeInfoProcessor method processResult.

@Override
public void processResult(Operation operation, Object resultObj, Map<String, Object> keyMap) throws BaseCollectionException {
    CloseableIterator<CIMInstance> volumeInstances = null;
    EnumerateResponse<CIMInstance> volumeInstanceChunks = null;
    CIMObjectPath storagePoolPath = null;
    WBEMClient client = null;
    try {
        _dbClient = (DbClient) keyMap.get(Constants.dbClient);
        client = SMICommunicationInterface.getCIMClient(keyMap);
        _profile = (AccessProfile) keyMap.get(Constants.ACCESSPROFILE);
        Map<String, VolHostIOObject> exportedVolumes = (Map<String, VolHostIOObject>) keyMap.get(Constants.EXPORTED_VOLUMES);
        Set<String> existingVolumesInCG = (Set<String>) keyMap.get(Constants.VOLUMES_PART_OF_CG);
        @SuppressWarnings("unchecked") Map<String, RemoteMirrorObject> volumeToRAGroupMap = (Map<String, RemoteMirrorObject>) keyMap.get(Constants.UN_VOLUME_RAGROUP_MAP);
        @SuppressWarnings("unchecked") Map<String, LocalReplicaObject> volumeToLocalReplicaMap = (Map<String, LocalReplicaObject>) keyMap.get(Constants.UN_VOLUME_LOCAL_REPLICA_MAP);
        @SuppressWarnings("unchecked") Map<String, Map<String, String>> volumeToSyncAspectMap = (Map<String, Map<String, String>>) keyMap.get(Constants.SNAPSHOT_NAMES_SYNCHRONIZATION_ASPECT_MAP);
        @SuppressWarnings("unchecked") Map<String, Set<String>> duplicateSyncAspectElementNameMap = (Map<String, Set<String>>) keyMap.get(Constants.DUPLICATE_SYNC_ASPECT_ELEMENT_NAME_MAP);
        @SuppressWarnings("unchecked") Map<String, Set<String>> vmax2ThinPoolToBoundVolumesMap = (Map<String, Set<String>>) keyMap.get(Constants.VMAX2_THIN_POOL_TO_BOUND_VOLUMES);
        Set<String> boundVolumes = null;
        storagePoolPath = getObjectPathfromCIMArgument(_args);
        String poolNativeGuid = NativeGUIDGenerator.generateNativeGuidForPool(storagePoolPath);
        StoragePool pool = checkStoragePoolExistsInDB(poolNativeGuid, _dbClient);
        if (pool == null) {
            _logger.error("Skipping unmanaged volume discovery as the storage pool with path {} doesn't exist in ViPR", storagePoolPath.toString());
            return;
        }
        StorageSystem system = _dbClient.queryObject(StorageSystem.class, _profile.getSystemId());
        _unManagedVolumesInsert = new ArrayList<UnManagedVolume>();
        _unManagedVolumesUpdate = new ArrayList<UnManagedVolume>();
        _unManagedExportMasksUpdate = new ArrayList<UnManagedExportMask>();
        // get bound volumes list for VMAX2 Thin pools
        boundVolumes = vmax2ThinPoolToBoundVolumesMap.get(storagePoolPath.toString());
        Set<String> poolSupportedSLONames = (Set<String>) keyMap.get(poolNativeGuid);
        _logger.debug("Pool Supporting SLO Names:{}", poolSupportedSLONames);
        _metaVolumeViewPaths = (List<CIMObjectPath>) keyMap.get(Constants.META_VOLUMES_VIEWS);
        if (_metaVolumeViewPaths == null) {
            _metaVolumeViewPaths = new ArrayList<CIMObjectPath>();
            keyMap.put(Constants.META_VOLUMES_VIEWS, _metaVolumeViewPaths);
        }
        // create empty place holder list for meta volume paths (cannot
        // define this in xml)
        _metaVolumePaths = (List<CIMObjectPath>) keyMap.get(Constants.META_VOLUMES);
        if (_metaVolumePaths == null) {
            _metaVolumePaths = new ArrayList<CIMObjectPath>();
            keyMap.put(Constants.META_VOLUMES, _metaVolumePaths);
        }
        _volumeToSpaceConsumedMap = (Map<String, String>) keyMap.get(Constants.VOLUME_SPACE_CONSUMED_MAP);
        // get VolumeInfo Object and inject Fast Policy Name.
        volumeInstanceChunks = (EnumerateResponse<CIMInstance>) resultObj;
        volumeInstances = volumeInstanceChunks.getResponses();
        Set<URI> srdfEnabledTargetVPools = SRDFUtils.fetchSRDFTargetVirtualPools(_dbClient);
        processVolumes(volumeInstances, keyMap, operation, pool, system, exportedVolumes, existingVolumesInCG, volumeToRAGroupMap, volumeToLocalReplicaMap, volumeToSyncAspectMap, poolSupportedSLONames, boundVolumes, srdfEnabledTargetVPools, duplicateSyncAspectElementNameMap);
        while (!volumeInstanceChunks.isEnd()) {
            _logger.info("Processing Next Volume Chunk of size {}", BATCH_SIZE);
            volumeInstanceChunks = client.getInstancesWithPath(storagePoolPath, volumeInstanceChunks.getContext(), new UnsignedInteger32(BATCH_SIZE));
            processVolumes(volumeInstanceChunks.getResponses(), keyMap, operation, pool, system, exportedVolumes, existingVolumesInCG, volumeToRAGroupMap, volumeToLocalReplicaMap, volumeToSyncAspectMap, poolSupportedSLONames, boundVolumes, srdfEnabledTargetVPools, duplicateSyncAspectElementNameMap);
        }
        if (null != _unManagedVolumesUpdate && !_unManagedVolumesUpdate.isEmpty()) {
            _partitionManager.updateAndReIndexInBatches(_unManagedVolumesUpdate, getPartitionSize(keyMap), _dbClient, UNMANAGED_VOLUME);
        }
        if (null != _unManagedVolumesInsert && !_unManagedVolumesInsert.isEmpty()) {
            _partitionManager.insertInBatches(_unManagedVolumesInsert, getPartitionSize(keyMap), _dbClient, UNMANAGED_VOLUME);
        }
        if (null != _unManagedExportMasksUpdate && !_unManagedExportMasksUpdate.isEmpty()) {
            _partitionManager.updateAndReIndexInBatches(_unManagedExportMasksUpdate, getPartitionSize(keyMap), _dbClient, UNMANAGED_EXPORT_MASK);
        }
        performStorageUnManagedVolumeBookKeeping(pool.getId());
    } catch (Exception e) {
        _logger.error("Processing Storage Volume Information failed :", e);
    } finally {
        _unManagedVolumesInsert = null;
        _unManagedVolumesUpdate = null;
        if (null != volumeInstances) {
            volumeInstances.close();
        }
        if (null != volumeInstanceChunks) {
            try {
                client.closeEnumeration(storagePoolPath, volumeInstanceChunks.getContext());
            } catch (Exception e) {
                _logger.debug("Exception occurred while closing enumeration", e);
            }
        }
    }
}
Also used : Set(java.util.Set) HashSet(java.util.HashSet) StringSet(com.emc.storageos.db.client.model.StringSet) StoragePool(com.emc.storageos.db.client.model.StoragePool) URI(java.net.URI) CIMInstance(javax.cim.CIMInstance) WBEMClient(javax.wbem.client.WBEMClient) StorageSystem(com.emc.storageos.db.client.model.StorageSystem) UnManagedExportMask(com.emc.storageos.db.client.model.UnManagedDiscoveredObjects.UnManagedExportMask) CIMObjectPath(javax.cim.CIMObjectPath) UnsignedInteger32(javax.cim.UnsignedInteger32) BaseCollectionException(com.emc.storageos.plugins.BaseCollectionException) IOException(java.io.IOException) UnManagedVolume(com.emc.storageos.db.client.model.UnManagedDiscoveredObjects.UnManagedVolume) Map(java.util.Map) HashMap(java.util.HashMap) StringSetMap(com.emc.storageos.db.client.model.StringSetMap)

Example 42 with BaseCollectionException

use of com.emc.storageos.plugins.BaseCollectionException in project coprhd-controller by CoprHD.

the class TierPolicyProcessor method processResult.

@Override
public void processResult(Operation operation, Object resultObj, Map<String, Object> keyMap) throws BaseCollectionException {
    try {
        _dbClient = (DbClient) keyMap.get(Constants.dbClient);
        CIMObjectPath storageGroupPath = getObjectPathfromCIMArgument(_args);
        @SuppressWarnings("unchecked") Map<String, CIMObjectPath> volumeToStorageGroupMapping = (Map<String, CIMObjectPath>) keyMap.get(Constants.VOLUME_STORAGE_GROUP_MAPPING);
        CIMObjectPath volumePath = volumeToStorageGroupMapping.get(storageGroupPath.getKey(Constants.INSTANCEID).getValue().toString());
        String nativeGuid = getUnManagedVolumeNativeGuidFromVolumePath(volumePath);
        UnManagedVolume preExistingVolume = checkUnManagedVolumeExistsInDB(nativeGuid, _dbClient);
        if (null == preExistingVolume) {
            return;
        }
        // get VolumeInfo Object and inject Fast Policy Name.
        @SuppressWarnings("unchecked") final Iterator<CIMObjectPath> it = (Iterator<CIMObjectPath>) resultObj;
        while (it.hasNext()) {
            CIMObjectPath policyPath = it.next();
            injectIntoVolumeInformationContainer(preExistingVolume, Constants.POLICYRULENAME, policyPath);
            preExistingVolume.putVolumeCharacterstics(SupportedVolumeCharacterstics.IS_AUTO_TIERING_ENABLED.toString(), "true");
        }
        _dbClient.persistObject(preExistingVolume);
    } catch (Exception e) {
        _logger.error("Processing Tier Policy in Pre Existing Volume  failed", e);
    }
}
Also used : UnManagedVolume(com.emc.storageos.db.client.model.UnManagedDiscoveredObjects.UnManagedVolume) CIMObjectPath(javax.cim.CIMObjectPath) Iterator(java.util.Iterator) Map(java.util.Map) BaseCollectionException(com.emc.storageos.plugins.BaseCollectionException)

Example 43 with BaseCollectionException

use of com.emc.storageos.plugins.BaseCollectionException in project coprhd-controller by CoprHD.

the class VNXFastVolumesProcessor method processResult.

@Override
public void processResult(Operation operation, Object resultObj, Map<String, Object> keyMap) throws BaseCollectionException {
    CloseableIterator<CIMObjectPath> volumeInstances = null;
    try {
        WBEMClient client = SMICommunicationInterface.getCIMClient(keyMap);
        _unManagedVolumesUpdate = new ArrayList<UnManagedVolume>();
        @SuppressWarnings("unchecked") EnumerateResponse<CIMObjectPath> volumeInstanceChunks = (EnumerateResponse<CIMObjectPath>) resultObj;
        volumeInstances = volumeInstanceChunks.getResponses();
        _dbClient = (DbClient) keyMap.get(Constants.dbClient);
        CIMObjectPath tierPolicypath = getObjectPathfromCIMArgument(_args);
        processVolumes(volumeInstances, tierPolicypath, keyMap, operation);
        while (!volumeInstanceChunks.isEnd()) {
            _logger.info("Processing Next Volume Chunk of size {}", BATCH_SIZE);
            volumeInstanceChunks = client.getInstancePaths(tierPolicypath, volumeInstanceChunks.getContext(), new UnsignedInteger32(BATCH_SIZE));
            processVolumes(volumeInstanceChunks.getResponses(), tierPolicypath, keyMap, operation);
        }
        if (!_unManagedVolumesUpdate.isEmpty()) {
            _partitionManager.updateInBatches(_unManagedVolumesUpdate, getPartitionSize(keyMap), _dbClient, "VOLUME");
            _unManagedVolumesUpdate.clear();
        }
    } catch (Exception e) {
        _logger.error("Discovering Tier Policies for vnx volumes failed", e);
    } finally {
        volumeInstances.close();
    }
}
Also used : UnManagedVolume(com.emc.storageos.db.client.model.UnManagedDiscoveredObjects.UnManagedVolume) CIMObjectPath(javax.cim.CIMObjectPath) UnsignedInteger32(javax.cim.UnsignedInteger32) WBEMClient(javax.wbem.client.WBEMClient) BaseCollectionException(com.emc.storageos.plugins.BaseCollectionException) EnumerateResponse(javax.wbem.client.EnumerateResponse)

Example 44 with BaseCollectionException

use of com.emc.storageos.plugins.BaseCollectionException in project coprhd-controller by CoprHD.

the class CephCommunicationInterface method scan.

@Override
public void scan(AccessProfile accessProfile) throws BaseCollectionException {
    _log.info("Starting scan of Ceph StorageProvider. IP={}", accessProfile.getIpAddress());
    StorageProvider provider = _dbClient.queryObject(StorageProvider.class, accessProfile.getSystemId());
    StorageProvider.ConnectionStatus status = StorageProvider.ConnectionStatus.NOTCONNECTED;
    Map<String, StorageSystemViewObject> storageSystemsCache = accessProfile.getCache();
    String cephType = StorageSystem.Type.ceph.name();
    try (CephClient cephClient = CephUtils.connectToCeph(_cephClientFactory, provider)) {
        ClusterInfo clusterInfo = cephClient.getClusterInfo();
        String systemNativeGUID = NativeGUIDGenerator.generateNativeGuid(cephType, clusterInfo.getFsid());
        StorageSystemViewObject viewObject = storageSystemsCache.get(systemNativeGUID);
        if (viewObject == null) {
            viewObject = new StorageSystemViewObject();
        }
        viewObject.setDeviceType(cephType);
        viewObject.addprovider(accessProfile.getSystemId().toString());
        viewObject.setProperty(StorageSystemViewObject.SERIAL_NUMBER, clusterInfo.getFsid());
        viewObject.setProperty(StorageSystemViewObject.STORAGE_NAME, systemNativeGUID);
        viewObject.setProperty(StorageSystemViewObject.MODEL, "Ceph Storage Cluster");
        // TODO It is possible to figure out more Ceph cluster details (version, alternative IPs, etc),
        // but neither Java client, nor pure librados provide this info. Since Ceph (with clien libraries)
        // is an open source project it is possible to extend its functionality, and then use it here
        storageSystemsCache.put(systemNativeGUID, viewObject);
        status = StorageProvider.ConnectionStatus.CONNECTED;
    } catch (Exception e) {
        _log.error(String.format("Exception was encountered when attempting to scan Ceph Instance %s", accessProfile.getIpAddress()), e);
        throw CephException.exceptions.operationException(e);
    } finally {
        provider.setConnectionStatus(status.name());
        _dbClient.updateObject(provider);
    }
}
Also used : ClusterInfo(com.emc.storageos.ceph.model.ClusterInfo) StorageSystemViewObject(com.emc.storageos.plugins.StorageSystemViewObject) StorageProvider(com.emc.storageos.db.client.model.StorageProvider) CephClient(com.emc.storageos.ceph.CephClient) CephException(com.emc.storageos.ceph.CephException) BaseCollectionException(com.emc.storageos.plugins.BaseCollectionException)

Example 45 with BaseCollectionException

use of com.emc.storageos.plugins.BaseCollectionException in project coprhd-controller by CoprHD.

the class DataDomainCommunicationInterface method collectStatisticsInformation.

@Override
public void collectStatisticsInformation(AccessProfile accessProfile) throws BaseCollectionException, DataDomainApiException {
    long statsCount = 0;
    URI storageSystemId = null;
    StorageSystem storageSystem = null;
    try {
        _log.info("Stats collection for {} using ip {}", accessProfile.getSystemId(), accessProfile.getIpAddress());
        storageSystemId = accessProfile.getSystemId();
        storageSystem = _dbClient.queryObject(StorageSystem.class, storageSystemId);
        initializeKeyMap(accessProfile);
        DataDomainClient ddClient = getDataDomainClient(accessProfile);
        URI providerId = storageSystem.getActiveProviderURI();
        StorageProvider provider = _dbClient.queryObject(StorageProvider.class, providerId);
        ZeroRecordGenerator zeroRecordGenerator = new FileZeroRecordGenerator();
        CassandraInsertion statsColumnInjector = new FileDBInsertion();
        DataDomainStatsRecorder recorder = new DataDomainStatsRecorder(zeroRecordGenerator, statsColumnInjector);
        // Stats collection start time
        long statsCollectionStartTime = storageSystem.getLastMeteringRunTime();
        // start time to the time the storage system was successfully discovered.
        if (statsCollectionStartTime == 0) {
            statsCollectionStartTime = storageSystem.getSuccessDiscoveryTime();
        }
        // Stats collection end time
        long statsCollectionEndTime = accessProfile.getCurrentSampleTime();
        _keyMap.put(Constants._TimeCollected, statsCollectionEndTime);
        // Get list of file systems on the device that are in the DB
        List<URI> fsUris = zeroRecordGenerator.extractVolumesOrFileSharesFromDB(storageSystemId, _dbClient, FileShare.class);
        List<FileShare> fsObjs = _dbClient.queryObject(FileShare.class, fsUris, true);
        // Get capacity usage info on individual mtrees
        List<Stat> stats = new ArrayList<>();
        for (FileShare fileSystem : fsObjs) {
            String fsNativeId = fileSystem.getNativeId();
            String fsNativeGuid = fileSystem.getNativeGuid();
            // Retrieve the last 2 data points only
            int entriesRetrieved = 0;
            List<DDStatsCapacityInfo> statsCapInfos = new ArrayList<>();
            // Default
            DDStatsIntervalQuery granularity = DDStatsIntervalQuery.hour;
            // Retrieve hourly data - lowest resolution supported by DD arrays.
            try {
                DDMtreeCapacityInfos mtreeCapInfo = ddClient.getMTreeCapacityInfo(storageSystem.getNativeGuid(), fsNativeId, DataDomainApiConstants.STATS_FIRST_PAGE, DataDomainApiConstants.STATS_PAGE_SIZE, DDStatsDataViewQuery.absolute, DDStatsIntervalQuery.hour, true, DataDomainApiConstants.DESCENDING_SORT);
                entriesRetrieved += mtreeCapInfo.getPagingInfo().getPageEntries();
                // Collect stats
                List<DDStatsCapacityInfo> capacityInfos = mtreeCapInfo.getStatsCapacityInfo();
                if (capacityInfos != null) {
                    statsCapInfos.addAll(capacityInfos);
                }
                statsCount += entriesRetrieved;
            } catch (Exception e) {
                _log.info("Stats collection info not found for fileNativeGuid ", fsNativeGuid);
                continue;
            }
            // Retrieved all pages, now save in DB if info changed in the latest data point
            long usedCapacity = 0;
            if (fileSystem.getUsedCapacity() != null) {
                usedCapacity = fileSystem.getUsedCapacity();
            }
            DDStatsCapacityInfo statsCapInfo = null;
            Stat stat = null;
            if (statsCapInfos != null && !statsCapInfos.isEmpty()) {
                statsCapInfo = statsCapInfos.get(0);
                _keyMap.put(Constants._Granularity, granularity);
                stat = recorder.addUsageInfo(statsCapInfo, _keyMap, fsNativeGuid, ddClient);
            }
            // Persist FileShare capacity stats only if usage info has changed
            long allocatedCapacity = 0;
            if (stat != null) {
                allocatedCapacity = stat.getAllocatedCapacity();
            }
            // TODO: a method to detect changes in stats will be useful
            boolean statsChanged = (usedCapacity != allocatedCapacity) ? true : false;
            if ((stat != null) && (!fileSystem.getInactive()) && (statsChanged)) {
                stats.add(stat);
                fileSystem.setUsedCapacity(allocatedCapacity);
                fileSystem.setCapacity(stat.getProvisionedCapacity());
                _dbClient.persistObject(fileSystem);
            }
        }
        // Determine if a filesystems were deleted from this device and write zero records for deleted ones
        zeroRecordGenerator.identifyRecordstobeZeroed(_keyMap, stats, FileShare.class);
        persistStatsInDB(stats);
        // TODO: Metering task completer will overwrite currTime below with a new
        // time as the last collection time. To avoid this, setLastTime in
        // MeteringTaskCompleter should be modified to set last metering run time
        // only if it
        storageSystem.setLastMeteringRunTime(statsCollectionEndTime);
        _log.info("Done metering device {}, processed {} file system stats ", storageSystemId, statsCount);
        _log.info("End collecting statistics for ip address {}", accessProfile.getIpAddress());
    } catch (Exception e) {
        _log.error("CollectStatisticsInformation failed. Storage system: " + storageSystemId, e);
        throw DataDomainApiException.exceptions.statsCollectionFailed(e.getMessage());
    }
}
Also used : DDStatsCapacityInfo(com.emc.storageos.datadomain.restapi.model.DDStatsCapacityInfo) ArrayList(java.util.ArrayList) StorageProvider(com.emc.storageos.db.client.model.StorageProvider) FileZeroRecordGenerator(com.emc.storageos.volumecontroller.impl.plugins.metering.file.FileZeroRecordGenerator) URI(java.net.URI) DataDomainClient(com.emc.storageos.datadomain.restapi.DataDomainClient) FileShare(com.emc.storageos.db.client.model.FileShare) UnManagedSMBFileShare(com.emc.storageos.db.client.model.UnManagedDiscoveredObjects.UnManagedSMBFileShare) AlternateIdConstraint(com.emc.storageos.db.client.constraint.AlternateIdConstraint) ContainmentConstraint(com.emc.storageos.db.client.constraint.ContainmentConstraint) InternalException(com.emc.storageos.svcs.errorhandling.resources.InternalException) DatabaseException(com.emc.storageos.db.exceptions.DatabaseException) DataDomainApiException(com.emc.storageos.datadomain.restapi.errorhandling.DataDomainApiException) BaseCollectionException(com.emc.storageos.plugins.BaseCollectionException) DataDomainResourceNotFoundException(com.emc.storageos.datadomain.restapi.errorhandling.DataDomainResourceNotFoundException) IOException(java.io.IOException) DataDomainStatsRecorder(com.emc.storageos.volumecontroller.impl.plugins.metering.datadomain.DataDomainStatsRecorder) Stat(com.emc.storageos.db.client.model.Stat) DDMtreeCapacityInfos(com.emc.storageos.datadomain.restapi.model.DDMtreeCapacityInfos) CassandraInsertion(com.emc.storageos.volumecontroller.impl.plugins.metering.CassandraInsertion) FileZeroRecordGenerator(com.emc.storageos.volumecontroller.impl.plugins.metering.file.FileZeroRecordGenerator) ZeroRecordGenerator(com.emc.storageos.volumecontroller.impl.plugins.metering.ZeroRecordGenerator) StorageSystem(com.emc.storageos.db.client.model.StorageSystem) FileDBInsertion(com.emc.storageos.volumecontroller.impl.plugins.metering.file.FileDBInsertion) DDStatsIntervalQuery(com.emc.storageos.datadomain.restapi.model.DDStatsIntervalQuery)

Aggregations

BaseCollectionException (com.emc.storageos.plugins.BaseCollectionException)185 Iterator (java.util.Iterator)66 CIMInstance (javax.cim.CIMInstance)66 CIMObjectPath (javax.cim.CIMObjectPath)59 StorageSystem (com.emc.storageos.db.client.model.StorageSystem)55 IOException (java.io.IOException)47 URI (java.net.URI)47 ArrayList (java.util.ArrayList)47 PostMethod (org.apache.commons.httpclient.methods.PostMethod)36 ResponsePacket (com.emc.nas.vnxfile.xmlapi.ResponsePacket)35 Status (com.emc.nas.vnxfile.xmlapi.Status)33 AccessProfile (com.emc.storageos.plugins.AccessProfile)30 List (java.util.List)30 Map (java.util.Map)30 DatabaseException (com.emc.storageos.db.exceptions.DatabaseException)28 StoragePool (com.emc.storageos.db.client.model.StoragePool)27 Header (org.apache.commons.httpclient.Header)27 StoragePort (com.emc.storageos.db.client.model.StoragePort)22 HashSet (java.util.HashSet)18 URISyntaxException (java.net.URISyntaxException)17