use of com.cloud.utils.db.GlobalLock in project cloudstack by apache.
the class SolidFirePrimaryDataStoreDriver method grantAccess.
// get the VAG associated with volumeInfo's cluster, if any (ListVolumeAccessGroups)
// if the VAG exists
// update the VAG to contain all IQNs of the hosts (ModifyVolumeAccessGroup)
// if the ID of volumeInfo in not in the VAG, add it (ModifyVolumeAccessGroup)
// if the VAG doesn't exist, create it with the IQNs of the hosts and the ID of volumeInfo (CreateVolumeAccessGroup)
@Override
public boolean grantAccess(DataObject dataObject, Host host, DataStore dataStore) {
Preconditions.checkArgument(dataObject != null, "'dataObject' should not be 'null'");
Preconditions.checkArgument(host != null, "'host' should not be 'null'");
Preconditions.checkArgument(dataStore != null, "'dataStore' should not be 'null'");
long sfVolumeId = getSolidFireVolumeId(dataObject, true);
long clusterId = host.getClusterId();
long storagePoolId = dataStore.getId();
ClusterVO cluster = clusterDao.findById(clusterId);
GlobalLock lock = GlobalLock.getInternLock(cluster.getUuid());
if (!lock.lock(LOCK_TIME_IN_SECONDS)) {
String errMsg = "Couldn't lock the DB (in grantAccess) on the following string: " + cluster.getUuid();
LOGGER.warn(errMsg);
throw new CloudRuntimeException(errMsg);
}
try {
ClusterDetailsVO clusterDetail = clusterDetailsDao.findDetail(clusterId, SolidFireUtil.getVagKey(storagePoolId));
String vagId = clusterDetail != null ? clusterDetail.getValue() : null;
List<HostVO> hosts = hostDao.findByClusterId(clusterId);
if (!SolidFireUtil.hostsSupport_iScsi(hosts)) {
String errMsg = "Not all hosts in the compute cluster support iSCSI.";
LOGGER.warn(errMsg);
throw new CloudRuntimeException(errMsg);
}
SolidFireUtil.SolidFireConnection sfConnection = SolidFireUtil.getSolidFireConnection(storagePoolId, storagePoolDetailsDao);
if (vagId != null) {
SolidFireUtil.SolidFireVag sfVag = SolidFireUtil.getVag(sfConnection, Long.parseLong(vagId));
long[] volumeIds = SolidFireUtil.getNewVolumeIds(sfVag.getVolumeIds(), sfVolumeId, true);
SolidFireUtil.modifyVag(sfConnection, sfVag.getId(), sfVag.getInitiators(), volumeIds);
} else {
SolidFireUtil.placeVolumeInVolumeAccessGroup(sfConnection, sfVolumeId, storagePoolId, cluster.getUuid(), hosts, clusterDetailsDao);
}
return true;
} finally {
lock.unlock();
lock.releaseRef();
}
}
use of com.cloud.utils.db.GlobalLock in project cloudstack by apache.
the class SolidFirePrimaryDataStoreDriver method revokeAccess.
// get the VAG associated with volumeInfo's cluster, if any (ListVolumeAccessGroups) // might not exist if using CHAP
// if the VAG exists
// remove the ID of volumeInfo from the VAG (ModifyVolumeAccessGroup)
@Override
public void revokeAccess(DataObject dataObject, Host host, DataStore dataStore) {
if (dataObject == null || host == null || dataStore == null) {
return;
}
long sfVolumeId = getSolidFireVolumeId(dataObject, false);
long clusterId = host.getClusterId();
long storagePoolId = dataStore.getId();
ClusterVO cluster = clusterDao.findById(clusterId);
GlobalLock lock = GlobalLock.getInternLock(cluster.getUuid());
if (!lock.lock(LOCK_TIME_IN_SECONDS)) {
String errMsg = "Couldn't lock the DB (in revokeAccess) on the following string: " + cluster.getUuid();
LOGGER.debug(errMsg);
throw new CloudRuntimeException(errMsg);
}
try {
ClusterDetailsVO clusterDetail = clusterDetailsDao.findDetail(clusterId, SolidFireUtil.getVagKey(storagePoolId));
String vagId = clusterDetail != null ? clusterDetail.getValue() : null;
if (vagId != null) {
SolidFireUtil.SolidFireConnection sfConnection = SolidFireUtil.getSolidFireConnection(storagePoolId, storagePoolDetailsDao);
SolidFireUtil.SolidFireVag sfVag = SolidFireUtil.getVag(sfConnection, Long.parseLong(vagId));
long[] volumeIds = SolidFireUtil.getNewVolumeIds(sfVag.getVolumeIds(), sfVolumeId, false);
SolidFireUtil.modifyVag(sfConnection, sfVag.getId(), sfVag.getInitiators(), volumeIds);
}
} finally {
lock.unlock();
lock.releaseRef();
}
}
use of com.cloud.utils.db.GlobalLock in project cloudstack by apache.
the class ConfigurationManagerImpl method createPortableIpRange.
@Override
@DB
@ActionEvent(eventType = EventTypes.EVENT_PORTABLE_IP_RANGE_CREATE, eventDescription = "creating portable ip range", async = false)
public PortableIpRange createPortableIpRange(final CreatePortableIpRangeCmd cmd) throws ConcurrentOperationException {
final Integer regionId = cmd.getRegionId();
final String startIP = cmd.getStartIp();
final String endIP = cmd.getEndIp();
final String gateway = cmd.getGateway();
final String netmask = cmd.getNetmask();
String vlanId = cmd.getVlan();
final RegionVO region = _regionDao.findById(regionId);
if (region == null) {
throw new InvalidParameterValueException("Invalid region ID: " + regionId);
}
if (!NetUtils.isValidIp(startIP) || !NetUtils.isValidIp(endIP) || !NetUtils.validIpRange(startIP, endIP)) {
throw new InvalidParameterValueException("Invalid portable ip range: " + startIP + "-" + endIP);
}
if (!NetUtils.sameSubnet(startIP, gateway, netmask)) {
throw new InvalidParameterValueException("Please ensure that your start IP is in the same subnet as " + "your portable IP range's gateway and as per the IP range's netmask.");
}
if (!NetUtils.sameSubnet(endIP, gateway, netmask)) {
throw new InvalidParameterValueException("Please ensure that your end IP is in the same subnet as " + "your portable IP range's gateway and as per the IP range's netmask.");
}
if (checkOverlapPortableIpRange(regionId, startIP, endIP)) {
throw new InvalidParameterValueException("Ip range: " + startIP + "-" + endIP + " overlaps with a portable" + " IP range already configured in the region " + regionId);
}
if (vlanId == null) {
vlanId = Vlan.UNTAGGED;
} else {
if (!NetUtils.isValidVlan(vlanId)) {
throw new InvalidParameterValueException("Invalid vlan id " + vlanId);
}
final List<DataCenterVO> zones = _zoneDao.listAllZones();
if (zones != null && !zones.isEmpty()) {
for (final DataCenterVO zone : zones) {
// check if there is zone vlan with same id
if (_vlanDao.findByZoneAndVlanId(zone.getId(), vlanId) != null) {
throw new InvalidParameterValueException("Found a VLAN id " + vlanId + " already existing in" + " zone " + zone.getUuid() + " that conflicts with VLAN id of the portable ip range being configured");
}
//check if there is a public ip range that overlaps with portable ip range being created
checkOverlapPublicIpRange(zone.getId(), startIP, endIP);
}
}
}
final GlobalLock portableIpLock = GlobalLock.getInternLock("PortablePublicIpRange");
portableIpLock.lock(5);
try {
final String vlanIdFinal = vlanId;
return Transaction.execute(new TransactionCallback<PortableIpRangeVO>() {
@Override
public PortableIpRangeVO doInTransaction(final TransactionStatus status) {
PortableIpRangeVO portableIpRange = new PortableIpRangeVO(regionId, vlanIdFinal, gateway, netmask, startIP, endIP);
portableIpRange = _portableIpRangeDao.persist(portableIpRange);
long startIpLong = NetUtils.ip2Long(startIP);
final long endIpLong = NetUtils.ip2Long(endIP);
while (startIpLong <= endIpLong) {
final PortableIpVO portableIP = new PortableIpVO(regionId, portableIpRange.getId(), vlanIdFinal, gateway, netmask, NetUtils.long2Ip(startIpLong));
_portableIpDao.persist(portableIP);
startIpLong++;
}
// implicitly enable portable IP service for the region
region.setPortableipEnabled(true);
_regionDao.update(region.getId(), region);
return portableIpRange;
}
});
} finally {
portableIpLock.unlock();
}
}
use of com.cloud.utils.db.GlobalLock in project cloudstack by apache.
the class AsyncJobManagerImpl method getHeartbeatTask.
private Runnable getHeartbeatTask() {
return new ManagedContextRunnable() {
@Override
protected void runInContext() {
GlobalLock scanLock = GlobalLock.getInternLock("AsyncJobManagerHeartbeat");
try {
if (scanLock.lock(ACQUIRE_GLOBAL_LOCK_TIMEOUT_FOR_COOPERATION)) {
try {
reallyRun();
} finally {
scanLock.unlock();
}
}
} finally {
scanLock.releaseRef();
}
}
protected void reallyRun() {
try {
List<SyncQueueItemVO> l = _queueMgr.dequeueFromAny(getMsid(), MAX_ONETIME_SCHEDULE_SIZE);
if (l != null && l.size() > 0) {
for (SyncQueueItemVO item : l) {
if (s_logger.isDebugEnabled()) {
s_logger.debug("Execute sync-queue item: " + item.toString());
}
executeQueueItem(item, false);
}
}
List<Long> standaloneWakeupJobs = wakeupScan();
for (Long jobId : standaloneWakeupJobs) {
// TODO, we assume that all jobs in this category is API job only
AsyncJobVO job = _jobDao.findById(jobId);
if (job != null && (job.getPendingSignals() & AsyncJob.Constants.SIGNAL_MASK_WAKEUP) != 0)
scheduleExecution(job, false);
}
} catch (Throwable e) {
s_logger.error("Unexpected exception when trying to execute queue item, ", e);
}
}
};
}
use of com.cloud.utils.db.GlobalLock in project cloudstack by apache.
the class SolidFireSharedPrimaryDataStoreLifeCycle method initialize.
// invoked to add primary storage that is based on the SolidFire plug-in
@Override
public DataStore initialize(Map<String, Object> dsInfos) {
final String CAPACITY_IOPS = "capacityIops";
String url = (String) dsInfos.get("url");
Long zoneId = (Long) dsInfos.get("zoneId");
Long podId = (Long) dsInfos.get("podId");
Long clusterId = (Long) dsInfos.get("clusterId");
String storagePoolName = (String) dsInfos.get("name");
String providerName = (String) dsInfos.get("providerName");
Long capacityBytes = (Long) dsInfos.get("capacityBytes");
Long capacityIops = (Long) dsInfos.get(CAPACITY_IOPS);
String tags = (String) dsInfos.get("tags");
@SuppressWarnings("unchecked") Map<String, String> details = (Map<String, String>) dsInfos.get("details");
if (podId == null) {
throw new CloudRuntimeException("The Pod ID must be specified.");
}
if (clusterId == null) {
throw new CloudRuntimeException("The Cluster ID must be specified.");
}
String storageVip = SolidFireUtil.getStorageVip(url);
int storagePort = SolidFireUtil.getStoragePort(url);
if (capacityBytes == null || capacityBytes <= 0) {
throw new IllegalArgumentException("'capacityBytes' must be present and greater than 0.");
}
if (capacityIops == null || capacityIops <= 0) {
throw new IllegalArgumentException("'capacityIops' must be present and greater than 0.");
}
HypervisorType hypervisorType = getHypervisorTypeForCluster(clusterId);
if (!isSupportedHypervisorType(hypervisorType)) {
throw new CloudRuntimeException(hypervisorType + " is not a supported hypervisor type.");
}
String datacenter = SolidFireUtil.getValue(SolidFireUtil.DATACENTER, url, false);
if (HypervisorType.VMware.equals(hypervisorType) && datacenter == null) {
throw new CloudRuntimeException("'Datacenter' must be set for hypervisor type of " + HypervisorType.VMware);
}
PrimaryDataStoreParameters parameters = new PrimaryDataStoreParameters();
parameters.setType(getStorageType(hypervisorType));
parameters.setZoneId(zoneId);
parameters.setPodId(podId);
parameters.setClusterId(clusterId);
parameters.setName(storagePoolName);
parameters.setProviderName(providerName);
parameters.setManaged(false);
parameters.setCapacityBytes(capacityBytes);
parameters.setUsedBytes(0);
parameters.setCapacityIops(capacityIops);
parameters.setHypervisorType(hypervisorType);
parameters.setTags(tags);
parameters.setDetails(details);
String managementVip = SolidFireUtil.getManagementVip(url);
int managementPort = SolidFireUtil.getManagementPort(url);
details.put(SolidFireUtil.MANAGEMENT_VIP, managementVip);
details.put(SolidFireUtil.MANAGEMENT_PORT, String.valueOf(managementPort));
String clusterAdminUsername = SolidFireUtil.getValue(SolidFireUtil.CLUSTER_ADMIN_USERNAME, url);
String clusterAdminPassword = SolidFireUtil.getValue(SolidFireUtil.CLUSTER_ADMIN_PASSWORD, url);
details.put(SolidFireUtil.CLUSTER_ADMIN_USERNAME, clusterAdminUsername);
details.put(SolidFireUtil.CLUSTER_ADMIN_PASSWORD, clusterAdminPassword);
long lMinIops = 100;
long lMaxIops = 15000;
long lBurstIops = 15000;
try {
String minIops = SolidFireUtil.getValue(SolidFireUtil.MIN_IOPS, url);
if (minIops != null && minIops.trim().length() > 0) {
lMinIops = Long.parseLong(minIops);
}
} catch (Exception ex) {
s_logger.info("[ignored] error getting Min IOPS: " + ex.getLocalizedMessage());
}
try {
String maxIops = SolidFireUtil.getValue(SolidFireUtil.MAX_IOPS, url);
if (maxIops != null && maxIops.trim().length() > 0) {
lMaxIops = Long.parseLong(maxIops);
}
} catch (Exception ex) {
s_logger.info("[ignored] error getting Max IOPS: " + ex.getLocalizedMessage());
}
try {
String burstIops = SolidFireUtil.getValue(SolidFireUtil.BURST_IOPS, url);
if (burstIops != null && burstIops.trim().length() > 0) {
lBurstIops = Long.parseLong(burstIops);
}
} catch (Exception ex) {
s_logger.info("[ignored] error getting Burst IOPS: " + ex.getLocalizedMessage());
}
if (lMinIops > lMaxIops) {
throw new CloudRuntimeException("The parameter '" + SolidFireUtil.MIN_IOPS + "' must be less than or equal to the parameter '" + SolidFireUtil.MAX_IOPS + "'.");
}
if (lMaxIops > lBurstIops) {
throw new CloudRuntimeException("The parameter '" + SolidFireUtil.MAX_IOPS + "' must be less than or equal to the parameter '" + SolidFireUtil.BURST_IOPS + "'.");
}
if (lMinIops != capacityIops) {
throw new CloudRuntimeException("The parameter '" + CAPACITY_IOPS + "' must be equal to the parameter '" + SolidFireUtil.MIN_IOPS + "'.");
}
if (lMinIops > SolidFireUtil.MAX_IOPS_PER_VOLUME || lMaxIops > SolidFireUtil.MAX_IOPS_PER_VOLUME || lBurstIops > SolidFireUtil.MAX_IOPS_PER_VOLUME) {
throw new CloudRuntimeException("This volume cannot exceed " + NumberFormat.getInstance().format(SolidFireUtil.MAX_IOPS_PER_VOLUME) + " IOPS.");
}
details.put(SolidFireUtil.MIN_IOPS, String.valueOf(lMinIops));
details.put(SolidFireUtil.MAX_IOPS, String.valueOf(lMaxIops));
details.put(SolidFireUtil.BURST_IOPS, String.valueOf(lBurstIops));
SolidFireUtil.SolidFireConnection sfConnection = new SolidFireUtil.SolidFireConnection(managementVip, managementPort, clusterAdminUsername, clusterAdminPassword);
SolidFireCreateVolume sfCreateVolume = createSolidFireVolume(sfConnection, storagePoolName, capacityBytes, lMinIops, lMaxIops, lBurstIops);
SolidFireUtil.SolidFireVolume sfVolume = sfCreateVolume.getVolume();
String iqn = sfVolume.getIqn();
details.put(SolidFireUtil.VOLUME_ID, String.valueOf(sfVolume.getId()));
parameters.setUuid(iqn);
if (HypervisorType.VMware.equals(hypervisorType)) {
String datastore = iqn.replace("/", "_");
String path = "/" + datacenter + "/" + datastore;
parameters.setHost("VMFS datastore: " + path);
parameters.setPort(0);
parameters.setPath(path);
details.put(SolidFireUtil.DATASTORE_NAME, datastore);
details.put(SolidFireUtil.IQN, iqn);
details.put(SolidFireUtil.STORAGE_VIP, storageVip);
details.put(SolidFireUtil.STORAGE_PORT, String.valueOf(storagePort));
} else {
parameters.setHost(storageVip);
parameters.setPort(storagePort);
parameters.setPath(iqn);
}
ClusterVO cluster = _clusterDao.findById(clusterId);
GlobalLock lock = GlobalLock.getInternLock(cluster.getUuid());
if (!lock.lock(SolidFireUtil.s_lockTimeInSeconds)) {
String errMsg = "Couldn't lock the DB on the following string: " + cluster.getUuid();
s_logger.debug(errMsg);
throw new CloudRuntimeException(errMsg);
}
DataStore dataStore = null;
try {
// this adds a row in the cloud.storage_pool table for this SolidFire volume
dataStore = _primaryDataStoreHelper.createPrimaryDataStore(parameters);
// now that we have a DataStore (we need the id from the DataStore instance), we can create a Volume Access Group, if need be, and
// place the newly created volume in the Volume Access Group
List<HostVO> hosts = _hostDao.findByClusterId(clusterId);
SolidFireUtil.placeVolumeInVolumeAccessGroup(sfConnection, sfVolume.getId(), dataStore.getId(), cluster.getUuid(), hosts, _clusterDetailsDao);
SolidFireUtil.SolidFireAccount sfAccount = sfCreateVolume.getAccount();
Account csAccount = CallContext.current().getCallingAccount();
SolidFireUtil.updateCsDbWithSolidFireAccountInfo(csAccount.getId(), sfAccount, dataStore.getId(), _accountDetailsDao);
} catch (Exception ex) {
_primaryDataStoreDao.expunge(dataStore.getId());
throw new CloudRuntimeException(ex.getMessage());
} finally {
lock.unlock();
lock.releaseRef();
}
return dataStore;
}
Aggregations