use of com.emc.storageos.db.exceptions.DatabaseException in project coprhd-controller by CoprHD.
the class NetworkScheduler method unexportVolumes.
/**
* Called from the unexportVolume call. and others. This method builds the NetworkFabricInfo to be passed to the
* NetworkDeviceController for automatic unzoning.
*
* @param volUris Collection of URIs for volumes whose references are to be deleted
* @param exportGroupUris List of URIs of all the export groups being processed that might contain the volumes.
* @param storagePortUri the URI of the StoragePort
* @param initiatorPort String WWPN with colons
* @param hasExistingVolumes If true, will not mark a zone as last reference, keeping them from being deleted
* @return List<NetworkFCZoneInfo> detailing zones to be removed or at least unreferenced
* @throws IOException
*/
public List<NetworkFCZoneInfo> unexportVolumes(URI varrayURI, Collection<URI> volUris, List<URI> exportGroupUris, URI storagePortUri, String initiatorPort, boolean hasExistingVolumes) {
List<NetworkFCZoneInfo> ourReferences = new ArrayList<NetworkFCZoneInfo>();
VirtualArray virtualArray = _dbClient.queryObject(VirtualArray.class, varrayURI);
if (virtualArray != null && virtualArray.getAutoSanZoning() == false) {
_log.info("Automatic SAN zoning is disabled in virtual array: " + virtualArray.getLabel());
return null;
}
initiatorPort = formatWWN(initiatorPort);
// Get the StoragePort
StoragePort port = null;
try {
port = _dbClient.queryObject(StoragePort.class, storagePortUri);
if (port == null) {
return null;
}
} catch (DatabaseException ex) {
return null;
}
// See if we can find our zone references
List<String> endPoints = new ArrayList<String>();
endPoints.add(initiatorPort);
endPoints.add(formatWWN(port.getPortNetworkId()));
// Make the key for our endPoints
String key = null;
{
NetworkFCZoneInfo fabricInfo = new NetworkFCZoneInfo();
fabricInfo.setEndPoints(endPoints);
key = fabricInfo.makeEndpointsKey();
}
// Create a map of the references keyed by volUri concatenated with export group URI.
// This allows for multiple export groups to export the same volume, and the zone will not
// be deleted until the volume's references are removed from all export groups.
// Then we can tell if other volumes are using this.
Map<String, FCZoneReference> volRefMap = makeExportToReferenceMap(key);
// If there were no references at all, we don't do anything.
if (volRefMap.isEmpty()) {
return null;
} else {
// Do this for each of the Export Groups being processed.
for (URI volUri : volUris) {
for (URI exportGroupUri : exportGroupUris) {
FCZoneReference ourReference = volRefMap.get(make2UriKey(volUri, exportGroupUri));
if (ourReference == null) {
continue;
}
// We need a fabricInfo for each,
// so as to remove the FCZoneReference that is keyed on volume/exportGroup.
NetworkFCZoneInfo fabricInfo = createZoneInfoForRef(ourReference, volUri, initiatorPort, port.getPortNetworkId(), null, exportGroupUri);
ourReferences.add(fabricInfo);
volRefMap.remove(make2UriKey(volUri, exportGroupUri));
}
}
// See if all the remaining entries have been marked for deletion.
boolean live = false;
for (FCZoneReference ref : volRefMap.values()) {
if (ref.getInactive() == false) {
// Here is an apparent live reference; look up the volume and make
// sure it's still active too.
BlockObject vol = BlockObject.fetch(_dbClient, ref.getVolumeUri());
ExportGroup group = _dbClient.queryObject(ExportGroup.class, ref.getGroupUri());
if (vol != null && vol.getInactive() == false && group != null && group.getInactive() == false) {
live = true;
} else {
// mark the errant reference inactive
_dbClient.markForDeletion(ref);
}
}
}
// sets existingZone which will prohibit deletion.
for (NetworkFCZoneInfo fabricInfo : ourReferences) {
fabricInfo.setLastReference(!live);
if (hasExistingVolumes) {
fabricInfo.setExistingZone(true);
}
// Pick an alternate device, just in case
NetworkLite portNet = getStoragePortNetwork(port);
NetworkLite iniNet = BlockStorageScheduler.lookupNetworkLite(_dbClient, StorageProtocol.block2Transport("FC"), initiatorPort);
List<NetworkSystem> networkSystems = getZoningNetworkSystems(iniNet, portNet);
for (NetworkSystem ns : networkSystems) {
if (!ns.getId().equals(fabricInfo.getNetworkDeviceId())) {
fabricInfo.setAltNetworkDeviceId(ns.getId());
break;
}
}
}
return ourReferences;
}
}
use of com.emc.storageos.db.exceptions.DatabaseException in project coprhd-controller by CoprHD.
the class RPCGCopyVolumeCompleter method complete.
protected void complete(DbClient dbClient, Operation.Status status, ServiceCoded coded) throws DeviceControllerException {
try {
// Tell the workflow we're done.
super.complete(dbClient, status, coded);
_log.info(String.format("Done RPCGCopyVolume - Id: %s, OpId: %s, status: %s", (getId() == null ? "unknown copy" : getId().toString()), getOpId(), status.name()));
// Tell the individual objects we're done.
for (URI id : getIds()) {
switch(status) {
case error:
if (URIUtil.isType(id, Volume.class)) {
dbClient.error(Volume.class, id, getOpId(), coded);
} else if (URIUtil.isType(id, BlockSnapshot.class)) {
dbClient.error(BlockSnapshot.class, id, getOpId(), coded);
} else if (URIUtil.isType(id, BlockSnapshotSession.class)) {
dbClient.error(BlockSnapshotSession.class, id, getOpId(), coded);
}
break;
default:
if (URIUtil.isType(id, Volume.class)) {
dbClient.ready(Volume.class, id, getOpId());
} else if (URIUtil.isType(id, BlockSnapshot.class)) {
dbClient.ready(BlockSnapshot.class, id, getOpId());
} else if (URIUtil.isType(id, BlockSnapshotSession.class)) {
dbClient.ready(BlockSnapshotSession.class, id, getOpId());
}
}
}
} catch (DatabaseException e) {
_log.error(String.format("Failed updating status for RP Volume Copy - Id: %s, OpId: %s", (getId() == null ? "unknown copy" : getId().toString()), getOpId()), e);
}
}
use of com.emc.storageos.db.exceptions.DatabaseException in project coprhd-controller by CoprHD.
the class RPCGExportOrchestrationCompleter method complete.
@Override
protected void complete(DbClient dbClient, Operation.Status status, ServiceCoded coded) throws DeviceControllerException {
try {
// Tell the workflow we're done.
super.complete(dbClient, status, coded);
_log.info("RP export orchestration completer: done");
_log.info(String.format("Done RPCGExportOrchestration - Id: %s, OpId: %s, status: %s", getId().toString(), getOpId(), status.name()));
// Tell the individual objects we're done.
for (URI id : getIds()) {
switch(status) {
case error:
dbClient.error(Volume.class, id, getOpId(), coded);
break;
default:
dbClient.ready(Volume.class, id, getOpId());
}
}
} catch (DatabaseException e) {
_log.error(String.format("Failed updating status for RP Export Orchestration - Id: %s, OpId: %s", getId().toString(), getOpId()), e);
}
}
use of com.emc.storageos.db.exceptions.DatabaseException in project coprhd-controller by CoprHD.
the class RPCGExpandVolumeCompleter method complete.
protected void complete(DbClient dbClient, Operation.Status status, ServiceCoded coded) throws DeviceControllerException {
try {
// Tell the workflow we're done.
super.complete(dbClient, status, coded);
_log.info(String.format("Done RPCGExpandVolume - Id: %s, OpId: %s, status: %s", getId().toString(), getOpId(), status.name()));
// Tell the individual objects we're done.
for (URI id : getIds()) {
switch(status) {
case error:
dbClient.error(Volume.class, id, getOpId(), coded);
break;
default:
dbClient.ready(Volume.class, id, getOpId());
}
}
} catch (DatabaseException e) {
_log.error(String.format("Failed updating status for RP Volume Expand - Id: %s, OpId: %s", getId().toString(), getOpId()), e);
}
}
use of com.emc.storageos.db.exceptions.DatabaseException in project coprhd-controller by CoprHD.
the class HDSStorageDevice method doCreateVolumes.
/*
* (non-Javadoc)
*
* @see com.emc.storageos.volumecontroller.BlockStorageDevice#doCreateVolumes(com.emc.storageos.db.client.model.StorageSystem,
* com.emc.storageos.db.client.model.StoragePool, java.lang.String, java.util.List,
* com.emc.storageos.volumecontroller.impl.utils.VirtualPoolCapabilityValuesWrapper, com.emc.storageos.volumecontroller.TaskCompleter)
*/
@Override
public void doCreateVolumes(StorageSystem storageSystem, StoragePool storagePool, String opId, List<Volume> volumes, VirtualPoolCapabilityValuesWrapper capabilities, TaskCompleter taskCompleter) throws DeviceControllerException {
String label = null;
Long capacity = null;
boolean isThinVolume = false;
boolean opCreationFailed = false;
StringBuilder logMsgBuilder = new StringBuilder(String.format("Create Volume Start - Array:%s, Pool:%s", storageSystem.getSerialNumber(), storagePool.getNativeGuid()));
for (Volume volume : volumes) {
logMsgBuilder.append(String.format("%nVolume:%s , IsThinlyProvisioned: %s", volume.getLabel(), volume.getThinlyProvisioned()));
if ((label == null) && (volumes.size() == 1)) {
String tenantName = "";
try {
TenantOrg tenant = dbClient.queryObject(TenantOrg.class, volume.getTenant().getURI());
tenantName = tenant.getLabel();
} catch (DatabaseException e) {
log.error("Error lookup TenantOrb object", e);
}
label = nameGenerator.generate(tenantName, volume.getLabel(), volume.getId().toString(), '-', HDSConstants.MAX_VOLUME_NAME_LENGTH);
}
if (capacity == null) {
capacity = volume.getCapacity();
}
isThinVolume = volume.getThinlyProvisioned();
}
log.info(logMsgBuilder.toString());
try {
multiVolumeCheckForHitachiModel(volumes, storageSystem);
HDSApiClient hdsApiClient = hdsApiFactory.getClient(HDSUtils.getHDSServerManagementServerInfo(storageSystem), storageSystem.getSmisUserName(), storageSystem.getSmisPassword());
String systemObjectID = HDSUtils.getSystemObjectID(storageSystem);
String poolObjectID = HDSUtils.getPoolObjectID(storagePool);
String asyncTaskMessageId = null;
// isThinVolume = false, creates LogicalUnits
if (isThinVolume) {
asyncTaskMessageId = hdsApiClient.createThinVolumes(systemObjectID, storagePool.getNativeId(), capacity, volumes.size(), label, QUICK_FORMAT_TYPE, storageSystem.getModel());
} else if (!isThinVolume) {
asyncTaskMessageId = hdsApiClient.createThickVolumes(systemObjectID, poolObjectID, capacity, volumes.size(), label, null, storageSystem.getModel(), null);
}
if (asyncTaskMessageId != null) {
HDSJob createHDSJob = (volumes.size() > 1) ? new HDSCreateMultiVolumeJob(asyncTaskMessageId, volumes.get(0).getStorageController(), storagePool.getId(), volumes.size(), taskCompleter) : new HDSCreateVolumeJob(asyncTaskMessageId, volumes.get(0).getStorageController(), storagePool.getId(), taskCompleter);
ControllerServiceImpl.enqueueJob(new QueueJob(createHDSJob));
} else {
throw HDSException.exceptions.asyncTaskFailed("Unable to get async taskId from HiCommand Device Manager for the create volume call");
}
} catch (final InternalException e) {
log.error("Problem in doCreateVolumes: ", e);
opCreationFailed = true;
taskCompleter.error(dbClient, e);
} catch (final Exception e) {
log.error("Problem in doCreateVolumes: ", e);
opCreationFailed = true;
ServiceError serviceError = DeviceControllerErrors.hds.methodFailed("doCreateVolumes", e.getMessage());
taskCompleter.error(dbClient, serviceError);
}
if (opCreationFailed) {
for (Volume vol : volumes) {
vol.setInactive(true);
dbClient.persistObject(vol);
}
}
logMsgBuilder = new StringBuilder(String.format("Create Volumes End - Array:%s, Pool:%s", storageSystem.getSerialNumber(), storagePool.getNativeGuid()));
for (Volume volume : volumes) {
logMsgBuilder.append(String.format("%nVolume:%s", volume.getLabel()));
}
log.info(logMsgBuilder.toString());
}
Aggregations