use of com.emc.storageos.db.client.DbClient in project coprhd-controller by CoprHD.
the class VNXFileSystemStaticLoadProcessor method processMountList.
/**
* Process the mountList which are received from XMLAPI server.
*
* @param mountList : List of Mount objects.
* @param keyMap : keyMap.
*/
private void processMountList(final List<Object> mountList, Map<String, Object> keyMap) throws VNXFilePluginException {
_logger.info("Processing file system mount response....");
final DbClient dbClient = (DbClient) keyMap.get(VNXFileConstants.DBCLIENT);
// step -1 get the filesystem capacity map < filesystemid, size>
Map<String, Long> fsCapList = (HashMap<String, Long>) keyMap.get(VNXFileConstants.FILE_CAPACITY_MAP);
Map<String, Map<String, Long>> snapCapFsMap = (HashMap<String, Map<String, Long>>) keyMap.get(VNXFileConstants.SNAP_CAPACITY_MAP);
// step-2 get the snapshot checkpoint size for give filesystem and it is map of filesystem and map <snapshot, checkpointsize>>
AccessProfile profile = (AccessProfile) keyMap.get(Constants.ACCESSPROFILE);
// get the storagesystem from db
StorageSystem storageSystem = dbClient.queryObject(StorageSystem.class, profile.getSystemId());
List<String> fsList = null;
Map<String, List<String>> fsMountvNASMap = new HashMap<String, List<String>>();
Map<String, List<String>> fsMountPhyNASMap = new HashMap<String, List<String>>();
// step -3 we will get filesystem on VDM or DM
Iterator<Object> iterator = mountList.iterator();
if (iterator.hasNext()) {
Status status = (Status) iterator.next();
if (status.getMaxSeverity() == Severity.OK) {
// step -4 get the filesystem list for each mover or VDM in Map
while (iterator.hasNext()) {
Mount mount = (Mount) iterator.next();
if (mount.isMoverIdIsVdm() == true) {
fsList = fsMountvNASMap.get(mount.getMover());
if (null == fsList) {
fsList = new ArrayList<String>();
}
fsList.add(mount.getFileSystem());
// get filesystem list for VDM or vNAS
fsMountvNASMap.put(mount.getMover(), fsList);
_logger.debug("Filestem or Snapshot {} mounted on vdm {} ", mount.getFileSystem(), mount.getMover());
} else {
fsList = fsMountPhyNASMap.get(mount.getMover());
if (null == fsList) {
fsList = new ArrayList<String>();
}
fsList.add(mount.getFileSystem());
// get filesystem list for DM or mover
fsMountPhyNASMap.put(mount.getMover(), fsList);
_logger.debug("Filestem or Snapshot {} mounted on data mover {} ", mount.getFileSystem(), mount.getMover());
}
}
// Log the number of objects mounted on each data mover and virtual data mover!!!
for (Entry<String, List<String>> eachVNas : fsMountvNASMap.entrySet()) {
_logger.info(" Virtual data mover {} has Filestem or Snapshot mounts {} ", eachVNas.getKey(), eachVNas.getValue().size());
}
for (Entry<String, List<String>> eachNas : fsMountPhyNASMap.entrySet()) {
_logger.info(" Data mover {} has Filestem or Snapshot mounts {} ", eachNas.getKey(), eachNas.getValue().size());
}
Map<String, Long> vdmCapacityMap = new HashMap<String, Long>();
Map<String, Long> dmCapacityMap = new HashMap<String, Long>();
vdmCapacityMap = computeMoverCapacity(fsMountvNASMap, fsCapList, snapCapFsMap);
dmCapacityMap = computeMoverCapacity(fsMountPhyNASMap, fsCapList, snapCapFsMap);
prepareDBMetrics(storageSystem, dbClient, fsMountPhyNASMap, dmCapacityMap, fsMountvNASMap, vdmCapacityMap);
} else {
throw new VNXFilePluginException("Fault response received from XMLAPI Server.", VNXFilePluginException.ERRORCODE_INVALID_RESPONSE);
}
}
}
use of com.emc.storageos.db.client.DbClient in project coprhd-controller by CoprHD.
the class VNXFileSystemUsageProcessor method injectProvisionedCapacity.
/**
* injects the ProvisionedCapacity from provisioning capacity.
*
* @param stat
* @param keyMap
*/
private void injectProvisionedCapacity(final Stat stat, final Map<String, Object> keyMap) {
final DbClient dbClient = (DbClient) keyMap.get(VNXFileConstants.DBCLIENT);
try {
final FileShare fileObj = dbClient.queryObject(FileShare.class, stat.getResourceId());
_logger.info("injectProvisioned Capacity existing {} from File System {}", stat.getProvisionedCapacity(), fileObj.getCapacity());
stat.setProvisionedCapacity(fileObj.getCapacity());
} catch (final Exception e) {
_logger.error("No FileShare found using resource {}", stat.getResourceId());
}
}
use of com.emc.storageos.db.client.DbClient in project coprhd-controller by CoprHD.
the class VPlexPerpetualCSVFileCollector method collect.
@Override
public void collect(AccessProfile accessProfile, Map<String, Object> context) {
init();
DbClient dbClient = (DbClient) context.get(Constants.dbClient);
// Get which VPlex array that this applies to
URI storageSystemURI = accessProfile.getSystemId();
StorageSystem storageSystem = dbClient.queryObject(StorageSystem.class, storageSystemURI);
if (storageSystem == null) {
log.error("Could not find StorageSystem '{}' in DB", storageSystemURI);
return;
}
StringSet providerIds = storageSystem.getProviders();
for (String providerId : providerIds) {
StorageProvider provider = dbClient.queryObject(StorageProvider.class, URI.create(providerId));
LinuxSystemCLI cli = new LinuxSystemCLI(provider.getIPAddress(), provider.getUserName(), provider.getPassword());
ListVPlexPerpetualCSVFileNames listDataFileNamesCmd = new ListVPlexPerpetualCSVFileNames();
cli.executeCommand(listDataFileNamesCmd);
// Process each of the data files that we found on the VPlex management station
List<String> fileNames = listDataFileNamesCmd.getResults();
for (String fileName : fileNames) {
log.info("Processing VPLEX performance statistics file {}", fileName);
// Extract and hold the data for this data file
ReadAndParseVPlexPerpetualCSVFile readDataFile = new ReadAndParseVPlexPerpetualCSVFile(fileName);
cli.executeCommand(readDataFile);
VPlexPerpetualCSVFileData fileData = readDataFile.getResults();
// Read the headers and extract those metric names that we're interested in and to which
// DataObject (StorageHADomain or StoragePort) that it should be associated with. This
// will be used as a way to look up the object when processing the actual metric data
Map<String, MetricHeaderInfo> metricNamesToHeaderInfo = processCSVFileDataHeader(dbClient, storageSystem, fileData.getDirectorName(), fileData.getHeaders());
List<Map<String, String>> dataLines = fileData.getDataLines();
int lineCount = dataLines.size();
// There is at least one data point
if (lineCount > 1) {
// Determine the last time that metrics were collected.
Long lastCollectionTimeUTC = getLastCollectionTime(metricNamesToHeaderInfo);
// Try to find the index into dataLines based on the last collection time.
// What we're trying to do here is determine the maximum value for the metrics
// from the last collection time in ViPR, until the last data line in the file.
int start = fileData.getDataIndexForTime(lastCollectionTimeUTC);
// Have a mapping of metrics to their maximum value found in the dataLines
Map<String, Double> maxValues = findMaxMetricValues(dataLines, start, lineCount);
// Process the metrics for this file
Map<String, String> last = dataLines.get(lineCount - 1);
processDirectorStats(metricNamesToHeaderInfo, maxValues, last);
processPortStats(context, metricNamesToHeaderInfo, maxValues, last);
}
// Clean up fileData resources
fileData.close();
}
// Clean out the cache data, so that it's not laying around
clearCaches();
}
}
use of com.emc.storageos.db.client.DbClient in project coprhd-controller by CoprHD.
the class ConnectionManagerUtils method disallowReaping.
public void disallowReaping(Object profile, Object client) throws BaseCollectionException {
AccessProfile accessProfile = (AccessProfile) profile;
DbClient dbClient = (DbClient) client;
try {
final CIMConnectionFactory connectionFactory = (CIMConnectionFactory) accessProfile.getCimConnectionFactory();
StorageSystem storageSystem = dbClient.queryObject(StorageSystem.class, accessProfile.getSystemId());
connectionFactory.setKeepAliveForConnection(storageSystem);
} catch (final IllegalStateException ex) {
log.error("Not able to get CIMOM Client instance for ip {} due to ", accessProfile.getIpAddress(), ex);
throw new SMIPluginException(SMIPluginException.ERRORCODE_NO_WBEMCLIENT, ex.fillInStackTrace(), ex.getMessage());
}
}
use of com.emc.storageos.db.client.DbClient in project coprhd-controller by CoprHD.
the class SmisAbstractCreateVolumeJob method updateStatus.
/**
* Called to update the job status when the volume create job completes.
* <p/>
* This is common update code for volume create operations.
*
* @param jobContext The job context.
*/
@Override
public void updateStatus(JobContext jobContext) throws Exception {
CloseableIterator<CIMObjectPath> iterator = null;
DbClient dbClient = jobContext.getDbClient();
JobStatus jobStatus = getJobStatus();
try {
if (jobStatus == JobStatus.IN_PROGRESS) {
return;
}
int volumeCount = 0;
String opId = getTaskCompleter().getOpId();
StringBuilder logMsgBuilder = new StringBuilder(String.format("Updating status of job %s to %s", opId, jobStatus.name()));
CIMConnectionFactory cimConnectionFactory = jobContext.getCimConnectionFactory();
WBEMClient client = getWBEMClient(dbClient, cimConnectionFactory);
iterator = client.associatorNames(getCimJob(), null, SmisConstants.CIM_STORAGE_VOLUME, null, null);
Calendar now = Calendar.getInstance();
// from pool's reserved capacity map.
if (jobStatus == JobStatus.SUCCESS || jobStatus == JobStatus.FAILED || jobStatus == JobStatus.FATAL_ERROR) {
SmisUtils.updateStoragePoolCapacity(dbClient, client, _storagePool);
StoragePool pool = dbClient.queryObject(StoragePool.class, _storagePool);
StringMap reservationMap = pool.getReservedCapacityMap();
for (URI volumeId : getTaskCompleter().getIds()) {
// remove from reservation map
reservationMap.remove(volumeId.toString());
}
dbClient.persistObject(pool);
}
if (jobStatus == JobStatus.SUCCESS) {
List<URI> volumes = new ArrayList<URI>();
while (iterator.hasNext()) {
CIMObjectPath volumePath = iterator.next();
CIMProperty<String> deviceID = (CIMProperty<String>) volumePath.getKey(SmisConstants.CP_DEVICE_ID);
String nativeID = deviceID.getValue();
URI volumeId = getTaskCompleter().getId(volumeCount++);
volumes.add(volumeId);
persistVolumeNativeID(dbClient, volumeId, nativeID, now);
processVolume(jobContext, volumePath, nativeID, volumeId, client, dbClient, logMsgBuilder, now);
}
// Add Volumes to Consistency Group (if needed)
addVolumesToConsistencyGroup(jobContext, volumes);
} else if (jobStatus == JobStatus.FAILED) {
if (iterator.hasNext()) {
while (iterator.hasNext()) {
CIMObjectPath volumePath = iterator.next();
CIMProperty<String> deviceID = (CIMProperty<String>) volumePath.getKey(SmisConstants.CP_DEVICE_ID);
String nativeID = deviceID.getValue();
URI volumeId = getTaskCompleter().getId(volumeCount++);
if ((nativeID != null) && (nativeID.length() != 0)) {
persistVolumeNativeID(dbClient, volumeId, nativeID, now);
processVolume(jobContext, volumePath, nativeID, volumeId, client, dbClient, logMsgBuilder, now);
} else {
logMsgBuilder.append("\n");
logMsgBuilder.append(String.format("Task %s failed to create volume: %s", opId, volumeId));
Volume volume = dbClient.queryObject(Volume.class, volumeId);
volume.setInactive(true);
dbClient.persistObject(volume);
}
}
} else {
for (URI id : getTaskCompleter().getIds()) {
logMsgBuilder.append("\n");
logMsgBuilder.append(String.format("Task %s failed to create volume: %s", opId, id.toString()));
Volume volume = dbClient.queryObject(Volume.class, id);
volume.setInactive(true);
dbClient.persistObject(volume);
}
}
}
_log.info(logMsgBuilder.toString());
} catch (Exception e) {
_log.error("Caught an exception while trying to updateStatus for SmisCreateVolumeJob", e);
setPostProcessingErrorStatus("Encountered an internal error during volume create job status processing : " + e.getMessage());
} finally {
super.updateStatus(jobContext);
if (iterator != null) {
iterator.close();
}
}
}
Aggregations