use of com.emc.storageos.db.client.model.BlockMirror in project coprhd-controller by CoprHD.
the class SmisBlockCreateCGMirrorJob method processCGMirrors.
/**
* Iterate through all created sync volumes, match up with ViPR created mirrors, and update them in ViPR.
*
* @param syncVolumeIter
* @param client
* @param dbClient
* @param helper
* @param storage
* @param mirrors
* @param repGroupID
* @param syncInst
* @param syncType
* @throws Exception
*/
private void processCGMirrors(CloseableIterator<CIMInstance> syncVolumeIter, WBEMClient client, DbClient dbClient, SmisCommandHelper helper, StorageSystem storage, List<BlockMirror> mirrors, String repGroupID, String syncInst, String syncType) throws Exception {
// Create mapping of volume.nativeDeviceId to BlockMirror object
Map<String, BlockMirror> volIdToMirrorMap = new HashMap<String, BlockMirror>();
for (BlockMirror mirror : mirrors) {
Volume volume = dbClient.queryObject(Volume.class, mirror.getSource());
volIdToMirrorMap.put(volume.getNativeId(), mirror);
}
// Get mapping of target Id to source Id
Map<String, String> tgtToSrcMap = getConsistencyGroupSyncPairs(dbClient, helper, storage, volIdToMirrorMap.keySet(), SmisConstants.MIRROR_VALUE);
Calendar now = Calendar.getInstance();
while (syncVolumeIter.hasNext()) {
// Get the target mirror volume native device id
CIMInstance syncVolume = syncVolumeIter.next();
CIMObjectPath syncVolumePath = syncVolume.getObjectPath();
String syncDeviceID = syncVolumePath.getKeyValue(SmisConstants.CP_DEVICE_ID).toString();
String elementName = CIMPropertyFactory.getPropertyValue(syncVolume, SmisConstants.CP_ELEMENT_NAME);
String wwn = CIMPropertyFactory.getPropertyValue(syncVolume, SmisConstants.CP_WWN_NAME);
String alternateName = CIMPropertyFactory.getPropertyValue(syncVolume, SmisConstants.CP_NAME);
// Get the associated volume for this sync volume
String volumeDeviceID = tgtToSrcMap.get(syncDeviceID);
// Lookup mirror associated with the source volume based on the source volume's native id
BlockMirror mirror = volIdToMirrorMap.get(volumeDeviceID);
mirror.setReplicationGroupInstance(repGroupID);
mirror.setProvisionedCapacity(getProvisionedCapacityInformation(client, syncVolume));
mirror.setAllocatedCapacity(getAllocatedCapacityInformation(client, syncVolume));
mirror.setWWN(wwn);
mirror.setAlternateName(alternateName);
mirror.setNativeId(syncDeviceID);
mirror.setNativeGuid(NativeGUIDGenerator.generateNativeGuid(storage, mirror));
mirror.setDeviceLabel(elementName);
mirror.setInactive(false);
mirror.setCreationTime(now);
mirror.setSynchronizedInstance(syncInst);
mirror.setSyncType(syncType);
dbClient.persistObject(mirror);
_log.info(String.format("For target mirror volume %1$s, going to set BlockMirror %2$s nativeId to %3$s (%4$s). Associated volume is %5$s", syncVolumePath.toString(), mirror.getId().toString(), syncDeviceID, elementName, volumeDeviceID));
}
}
use of com.emc.storageos.db.client.model.BlockMirror in project coprhd-controller by CoprHD.
the class SmisBlockCreateMirrorJob method updateStatus.
@Override
public void updateStatus(JobContext jobContext) throws Exception {
CloseableIterator<CIMObjectPath> syncVolumeIter = null;
DbClient dbClient = jobContext.getDbClient();
BlockMirrorCreateCompleter completer = null;
JobStatus jobStatus = getJobStatus();
try {
if (jobStatus == JobStatus.IN_PROGRESS) {
return;
}
completer = (BlockMirrorCreateCompleter) getTaskCompleter();
BlockMirror mirror = dbClient.queryObject(BlockMirror.class, completer.getMirrorURI());
StorageSystem storage = dbClient.queryObject(StorageSystem.class, getStorageSystemURI());
CIMConnectionFactory cimConnectionFactory;
WBEMClient client = null;
// from pool's reserved capacity map.
if (jobStatus == JobStatus.SUCCESS || jobStatus == JobStatus.FAILED || jobStatus == JobStatus.FATAL_ERROR) {
cimConnectionFactory = jobContext.getCimConnectionFactory();
client = getWBEMClient(dbClient, cimConnectionFactory);
URI poolURI = mirror.getPool();
SmisUtils.updateStoragePoolCapacity(dbClient, client, poolURI);
StoragePool pool = dbClient.queryObject(StoragePool.class, poolURI);
StringMap reservationMap = pool.getReservedCapacityMap();
// remove from reservation map
reservationMap.remove(mirror.getId().toString());
dbClient.persistObject(pool);
}
if (jobStatus == JobStatus.SUCCESS) {
_log.info("Mirror creation success");
cimConnectionFactory = jobContext.getCimConnectionFactory();
client = getWBEMClient(dbClient, cimConnectionFactory);
syncVolumeIter = client.associatorNames(getCimJob(), null, SmisConstants.CIM_STORAGE_VOLUME, null, null);
if (syncVolumeIter.hasNext()) {
// Get the target mirror volume native device id
CIMObjectPath targetVolumePath = syncVolumeIter.next();
CIMInstance syncVolume = client.getInstance(targetVolumePath, false, false, null);
String syncDeviceID = targetVolumePath.getKey(SmisConstants.CP_DEVICE_ID).getValue().toString();
String elementName = CIMPropertyFactory.getPropertyValue(syncVolume, SmisConstants.CP_ELEMENT_NAME);
String wwn = CIMPropertyFactory.getPropertyValue(syncVolume, SmisConstants.CP_WWN_NAME);
String alternateName = CIMPropertyFactory.getPropertyValue(syncVolume, SmisConstants.CP_NAME);
CIMInstance syncInstance = getStorageSyncInstanceFromVolume(client, targetVolumePath);
// Lookup the associated source volume based on the volume native device id
mirror.setProvisionedCapacity(getProvisionedCapacityInformation(client, syncVolume));
mirror.setAllocatedCapacity(getAllocatedCapacityInformation(client, syncVolume));
mirror.setWWN(wwn);
mirror.setAlternateName(alternateName);
mirror.setNativeId(syncDeviceID);
mirror.setNativeGuid(NativeGUIDGenerator.generateNativeGuid(storage, mirror));
mirror.setDeviceLabel(elementName);
mirror.setInactive(false);
mirror.setSynchronizedInstance(syncInstance.getObjectPath().toString());
updateSynchronizationAspects(client, mirror);
// mirror.setIsSyncActive(_wantSyncActive);
Volume volume = dbClient.queryObject(Volume.class, mirror.getSource().getURI());
_log.info(String.format("For target mirror volume %1$s, going to set BlockMirror %2$s nativeId to %3$s (%4$s). Associated volume is %5$s (%6$s)", targetVolumePath.toString(), mirror.getId().toString(), syncDeviceID, elementName, volume.getNativeId(), volume.getDeviceLabel()));
dbClient.persistObject(mirror);
}
} else if (isJobInTerminalFailedState()) {
_log.info("Failed to create mirror");
completer.error(dbClient, DeviceControllerException.exceptions.attachVolumeMirrorFailed(getMessage()));
mirror.setInactive(true);
dbClient.persistObject(mirror);
}
} catch (Exception e) {
setFatalErrorStatus("Encountered an internal error during block create mirror job status processing: " + e.getMessage());
_log.error("Caught an exception while trying to updateStatus for SmisBlockCreateMirrorJob", e);
if (completer != null) {
completer.error(dbClient, DeviceControllerException.errors.jobFailed(e));
}
} finally {
if (syncVolumeIter != null) {
syncVolumeIter.close();
}
super.updateStatus(jobContext);
}
}
use of com.emc.storageos.db.client.model.BlockMirror in project coprhd-controller by CoprHD.
the class StorageVolumeProcessor method processInstances.
@Override
protected int processInstances(Iterator<CIMInstance> instances, WBEMClient client) {
int count = 0;
List<CIMObjectPath> metaVolumes = new ArrayList<>();
while (instances.hasNext()) {
try {
count++;
CIMInstance volumeInstance = instances.next();
String nativeGuid = getVolumeNativeGuid(volumeInstance.getObjectPath());
if (isSnapShot(volumeInstance)) {
BlockSnapshot snapShot = checkSnapShotExistsInDB(nativeGuid, _dbClient);
if (null == snapShot || snapShot.getInactive()) {
_logger.debug("Skipping Snapshot, as its not being managed in ViPR");
continue;
}
updateBlockSnapShot(volumeInstance, snapShot, _keyMap);
if (_updateSnapShots.size() > BATCH_SIZE) {
_partitionManager.updateInBatches(_updateSnapShots, getPartitionSize(_keyMap), _dbClient, BLOCK_SNAPSHOT);
_updateSnapShots.clear();
}
} else if (isMirror(volumeInstance)) {
BlockMirror mirror = checkBlockMirrorExistsInDB(nativeGuid, _dbClient);
if (null == mirror || mirror.getInactive()) {
_logger.debug("Skipping Mirror, as its not being managed in Bourne");
continue;
}
CIMInstance syncObject = getSyncElement(volumeInstance, client);
updateBlockMirror(volumeInstance, mirror, _keyMap, syncObject);
if (_updateMirrors.size() > BATCH_SIZE) {
_partitionManager.updateInBatches(_updateMirrors, getPartitionSize(_keyMap), _dbClient, BLOCK_MIRROR);
_updateMirrors.clear();
}
} else {
Volume storageVolume = checkStorageVolumeExistsInDB(nativeGuid, _dbClient);
if (null == storageVolume || storageVolume.getInactive()) {
continue;
}
_logger.debug("Volume managed by Bourne :" + storageVolume.getNativeGuid());
updateStorageVolume(volumeInstance, storageVolume, _keyMap);
// This is applicable for meta volumes discovered as unmanaged volumes and ingested prior to vipr controller 2.2 .
if (storageVolume.getIsComposite() && (storageVolume.getCompositionType() == null || storageVolume.getCompositionType().isEmpty())) {
// meta volume is missing meta related data. Need to discover this data and set in the volume.
metaVolumes.add(volumeInstance.getObjectPath());
_logger.info("Found meta volume in vipr with missing data: {}, name: {}", volumeInstance.getObjectPath(), storageVolume.getLabel());
}
}
if (_updateVolumes.size() > BATCH_SIZE) {
_partitionManager.updateInBatches(_updateVolumes, getPartitionSize(_keyMap), _dbClient, VOLUME);
_updateVolumes.clear();
}
} catch (Exception e) {
_logger.error("Processing volume instance.", e);
}
}
// Add meta volumes to the keyMap
if (metaVolumes != null && !metaVolumes.isEmpty()) {
_metaVolumePaths.addAll(metaVolumes);
_logger.info("Added {} meta volumes.", metaVolumes.size());
}
return count;
}
use of com.emc.storageos.db.client.model.BlockMirror in project coprhd-controller by CoprHD.
the class StorageVolumeInfoProcessor method processVolumes.
/**
* Process the volumes to find the unmanaged volumes and populate the volume
* supported information.
*
* @param it
* @param keyMap
* @param operation
* @param pool
* @param system
* @param exportedVolumes
* @param volumesAndReplicas
* @param existingVolumesInCG
* @param volumeToRAGroupMap
* @param poolSupportedSLONames
* @param boundVolumes
* @param srdfEnabledTargetVPools
* @param duplicateSyncAspectElementNameMap
*/
private void processVolumes(Iterator<CIMInstance> it, Map<String, Object> keyMap, Operation operation, StoragePool pool, StorageSystem system, Map<String, VolHostIOObject> exportedVolumes, Set<String> existingVolumesInCG, Map<String, RemoteMirrorObject> volumeToRAGroupMap, Map<String, LocalReplicaObject> volumeToLocalReplicaMap, Map<String, Map<String, String>> volumeToSyncAspectMap, Set<String> poolSupportedSLONames, Set<String> boundVolumes, Set<URI> srdfEnabledTargetVPools, Map<String, Set<String>> duplicateSyncAspectElementNameMap) {
List<CIMObjectPath> metaVolumes = new ArrayList<CIMObjectPath>();
List<CIMObjectPath> metaVolumeViews = new ArrayList<CIMObjectPath>();
while (it.hasNext()) {
CIMInstance volumeViewInstance = null;
try {
volumeViewInstance = it.next();
String volumeNativeGuid = getVolumeViewNativeGuid(volumeViewInstance.getObjectPath(), keyMap);
Volume volume = checkStorageVolumeExistsInDB(volumeNativeGuid, _dbClient);
// create UnManaged Volume object for VPLEX VMAX backend volume.
if (null != volume) {
_logger.debug("Skipping discovery, as this Volume {} is already being managed by ViPR.", volumeNativeGuid);
continue;
}
// The discovered volume could also be a BlockSnapshot or a BlockMirror so
// check for these as well.
BlockSnapshot snap = DiscoveryUtils.checkBlockSnapshotExistsInDB(_dbClient, volumeNativeGuid);
if (null != snap) {
_logger.debug("Skipping discovery, as this discovered volume {} is already a managed BlockSnapshot in ViPR.", volumeNativeGuid);
continue;
}
BlockMirror mirror = checkBlockMirrorExistsInDB(volumeNativeGuid, _dbClient);
if (null != mirror) {
_logger.debug("Skipping discovery, as this discovered volume {} is already a managed BlockMirror in ViPR.", volumeNativeGuid);
continue;
}
// skip non-bound volumes for this pool
if (boundVolumes != null) {
String deviceId = null;
if (system.getUsingSmis80()) {
deviceId = volumeViewInstance.getObjectPath().getKey(DEVICE_ID).getValue().toString();
} else {
deviceId = volumeViewInstance.getObjectPath().getKey(SVDEVICEID).getValue().toString();
}
if (!boundVolumes.contains(deviceId)) {
_logger.info("Skipping volume, as this Volume {} is not bound to this Thin Storage Pool {}", volumeNativeGuid, pool.getLabel());
continue;
}
}
addPath(keyMap, operation.getResult(), volumeViewInstance.getObjectPath());
String unManagedVolumeNativeGuid = getUnManagedVolumeNativeGuid(volumeViewInstance.getObjectPath(), keyMap);
UnManagedVolume unManagedVolume = checkUnManagedVolumeExistsInDB(unManagedVolumeNativeGuid, _dbClient);
unManagedVolume = createUnManagedVolume(unManagedVolume, volumeViewInstance, unManagedVolumeNativeGuid, pool, system, volumeNativeGuid, exportedVolumes, existingVolumesInCG, volumeToRAGroupMap, volumeToLocalReplicaMap, volumeToSyncAspectMap, poolSupportedSLONames, keyMap, srdfEnabledTargetVPools, duplicateSyncAspectElementNameMap);
// set up UnManagedExportMask information
boolean nonRpExported = false;
@SuppressWarnings("unchecked") Map<String, Set<UnManagedExportMask>> masksMap = (Map<String, Set<UnManagedExportMask>>) keyMap.get(Constants.UNMANAGED_EXPORT_MASKS_MAP);
if (masksMap != null) {
Set<UnManagedExportMask> uems = masksMap.get(unManagedVolume.getNativeGuid());
if (uems != null) {
_logger.info("{} UnManagedExportMasks found in the keyMap for volume {}", uems.size(), unManagedVolume.getNativeGuid());
for (UnManagedExportMask uem : uems) {
boolean backendMaskFound = false;
_logger.info(" adding UnManagedExportMask {} to UnManagedVolume", uem.getMaskingViewPath());
unManagedVolume.getUnmanagedExportMasks().add(uem.getId().toString());
uem.getUnmanagedVolumeUris().add(unManagedVolume.getId().toString());
if (!_unManagedExportMasksUpdate.contains(uem)) {
_unManagedExportMasksUpdate.add(uem);
}
// add the known initiators, too
for (String initUri : uem.getKnownInitiatorUris()) {
_logger.info(" adding known Initiator URI {} to UnManagedVolume", initUri);
unManagedVolume.getInitiatorUris().add(initUri);
Initiator init = _dbClient.queryObject(Initiator.class, URI.create(initUri));
unManagedVolume.getInitiatorNetworkIds().add(init.getInitiatorPort());
}
// log this info for debugging
for (String path : uem.getUnmanagedInitiatorNetworkIds()) {
_logger.info(" UnManagedExportMask has this initiator unknown to ViPR: {}", path);
}
// Check if this volume is in an RP mask, and mark it as an RP
// volume if it is.
Object o = keyMap.get(Constants.UNMANAGED_RECOVERPOINT_MASKS_SET);
if (o != null) {
Set<String> unmanagedRecoverPointMasks = (Set<String>) o;
if (!unmanagedRecoverPointMasks.isEmpty()) {
if (unmanagedRecoverPointMasks.contains(uem.getId().toString())) {
_logger.info("unmanaged volume {} is an RP volume", unManagedVolume.getLabel());
unManagedVolume.putVolumeCharacterstics(SupportedVolumeCharacterstics.IS_RECOVERPOINT_ENABLED.toString(), "true");
backendMaskFound = true;
}
}
}
// check if this volume is in a vplex backend mask
// and mark it as such if it is
o = keyMap.get(Constants.UNMANAGED_VPLEX_BACKEND_MASKS_SET);
if (o != null) {
Set<String> unmanagedVplexBackendMasks = (Set<String>) o;
if (!unmanagedVplexBackendMasks.isEmpty()) {
if (unmanagedVplexBackendMasks.contains(uem.getId().toString())) {
_logger.info("unmanaged volume {} is a vplex backend volume", unManagedVolume.getLabel());
unManagedVolume.putVolumeCharacterstics(SupportedVolumeCharacterstics.IS_VPLEX_BACKEND_VOLUME.toString(), "true");
}
}
}
if (!backendMaskFound) {
nonRpExported = true;
}
}
}
}
// this as a convenience to ingest features.
if (nonRpExported) {
_logger.info("unmanaged volume {} is exported to something other than RP. Marking IS_NONRP_EXPORTED.", unManagedVolume.getLabel());
unManagedVolume.putVolumeCharacterstics(SupportedVolumeCharacterstics.IS_NONRP_EXPORTED.toString(), "true");
} else {
_logger.info("unmanaged volume {} is not exported OR not exported to something other than RP. Not marking IS_NONRP_EXPORTED.", unManagedVolume.getLabel());
unManagedVolume.putVolumeCharacterstics(SupportedVolumeCharacterstics.IS_NONRP_EXPORTED.toString(), "false");
}
_logger.debug("Going to check if the volume is meta: {}, volume meta property: {}", volumeViewInstance.getObjectPath(), unManagedVolume.getVolumeCharacterstics().get(SupportedVolumeCharacterstics.IS_METAVOLUME.toString()));
// Check if the volume is meta volume and add it to the meta
// volume list
String isMetaVolume = unManagedVolume.getVolumeCharacterstics().get(SupportedVolumeCharacterstics.IS_METAVOLUME.toString());
if (null != isMetaVolume && Boolean.valueOf(isMetaVolume)) {
if (keyMap.containsKey(Constants.IS_NEW_SMIS_PROVIDER) && Boolean.valueOf(keyMap.get(Constants.IS_NEW_SMIS_PROVIDER).toString())) {
metaVolumes.add(volumeViewInstance.getObjectPath());
} else {
metaVolumeViews.add(volumeViewInstance.getObjectPath());
}
_logger.info("Found meta volume: {}, name: {}", volumeViewInstance.getObjectPath(), unManagedVolume.getLabel());
}
// if volumes size reaches 200 , then dump to Db.
if (_unManagedVolumesInsert.size() > BATCH_SIZE) {
_partitionManager.insertInBatches(_unManagedVolumesInsert, BATCH_SIZE, _dbClient, UNMANAGED_VOLUME);
_unManagedVolumesInsert.clear();
}
if (_unManagedVolumesUpdate.size() > BATCH_SIZE) {
_partitionManager.updateAndReIndexInBatches(_unManagedVolumesUpdate, BATCH_SIZE, _dbClient, UNMANAGED_VOLUME);
_unManagedVolumesUpdate.clear();
}
if (_unManagedExportMasksUpdate.size() > BATCH_SIZE) {
_partitionManager.updateAndReIndexInBatches(_unManagedExportMasksUpdate, BATCH_SIZE, _dbClient, UNMANAGED_EXPORT_MASK);
_unManagedExportMasksUpdate.clear();
}
unManagedVolumesReturnedFromProvider.add(unManagedVolume.getId());
} catch (Exception ex) {
_logger.error("Processing UnManaged Storage Volume {} ", volumeViewInstance.getObjectPath(), ex);
}
}
// Add meta volumes to the keyMap
try {
if (metaVolumes != null && !metaVolumes.isEmpty()) {
_metaVolumePaths.addAll(metaVolumes);
_logger.info("Added {} meta volumes.", metaVolumes.size());
}
if (metaVolumeViews != null && !metaVolumeViews.isEmpty()) {
_metaVolumeViewPaths.addAll(metaVolumeViews);
_logger.info("Added {} meta volume views.", metaVolumeViews.size());
}
} catch (Exception ex) {
_logger.error("Processing UnManaged meta volumes.", ex);
}
}
use of com.emc.storageos.db.client.model.BlockMirror in project coprhd-controller by CoprHD.
the class AbstractMirrorOperations method fractureSingleVolumeMirror.
@Override
public void fractureSingleVolumeMirror(StorageSystem storage, URI mirror, Boolean sync, TaskCompleter taskCompleter) throws DeviceControllerException {
_log.info("fractureSingleVolumeMirror operation START");
CloseableIterator<CIMObjectPath> storageSyncRefs = null;
try {
BlockMirror mirrorObj = _dbClient.queryObject(BlockMirror.class, mirror);
CIMObjectPath mirrorPath = _cimPath.getBlockObjectPath(storage, mirrorObj);
// Get reference to the CIM_StorageSynchronized instance
storageSyncRefs = _helper.getReference(storage, mirrorPath, SmisConstants.CIM_STORAGE_SYNCHRONIZED, null);
boolean isVmax3 = storage.checkIfVmax3();
while (storageSyncRefs.hasNext()) {
CIMObjectPath storageSync = storageSyncRefs.next();
CIMArgument[] inArgs = isVmax3 ? _helper.getFractureMirrorInputArgumentsWithCopyState(storageSync, sync) : _helper.getFractureMirrorInputArguments(storageSync, sync);
CIMArgument[] outArgs = new CIMArgument[5];
// Invoke method to fracture the synchronization
_helper.callModifyReplica(storage, inArgs, outArgs);
taskCompleter.ready(_dbClient);
}
} catch (Exception e) {
_log.info("Problem making SMI-S call", e);
ServiceError serviceError = DeviceControllerException.errors.jobFailed(e);
taskCompleter.error(_dbClient, serviceError);
} finally {
if (storageSyncRefs != null) {
storageSyncRefs.close();
}
}
}
Aggregations