use of com.cloud.vm.VMInstanceVO in project cloudstack by apache.
the class ResourceManagerImpl method doDeleteHost.
@DB
protected boolean doDeleteHost(final long hostId, final boolean isForced, final boolean isForceDeleteStorage) {
_accountMgr.getActiveUser(CallContext.current().getCallingUserId());
// Verify that host exists
final HostVO host = _hostDao.findById(hostId);
if (host == null) {
throw new InvalidParameterValueException("Host with id " + hostId + " doesn't exist");
}
_accountMgr.checkAccessAndSpecifyAuthority(CallContext.current().getCallingAccount(), host.getDataCenterId());
if (!isForced && host.getResourceState() != ResourceState.Maintenance) {
throw new CloudRuntimeException("Host " + host.getUuid() + " cannot be deleted as it is not in maintenance mode. Either put the host into maintenance or perform a forced deletion.");
}
// Get storage pool host mappings here because they can be removed as a
// part of handleDisconnect later
// TODO: find out the bad boy, what's a buggy logic!
final List<StoragePoolHostVO> pools = _storagePoolHostDao.listByHostIdIncludingRemoved(hostId);
final ResourceStateAdapter.DeleteHostAnswer answer = (ResourceStateAdapter.DeleteHostAnswer) dispatchToStateAdapters(ResourceStateAdapter.Event.DELETE_HOST, false, host, isForced, isForceDeleteStorage);
if (answer == null) {
throw new CloudRuntimeException("No resource adapter respond to DELETE_HOST event for " + host.getName() + " id = " + hostId + ", hypervisorType is " + host.getHypervisorType() + ", host type is " + host.getType());
}
if (answer.getIsException()) {
return false;
}
if (!answer.getIsContinue()) {
return true;
}
Long clusterId = host.getClusterId();
_agentMgr.notifyMonitorsOfHostAboutToBeRemoved(host.getId());
Transaction.execute(new TransactionCallbackNoReturn() {
@Override
public void doInTransactionWithoutResult(final TransactionStatus status) {
_dcDao.releasePrivateIpAddress(host.getPrivateIpAddress(), host.getDataCenterId(), null);
_agentMgr.disconnectWithoutInvestigation(hostId, Status.Event.Remove);
// delete host details
_hostDetailsDao.deleteDetails(hostId);
// if host is GPU enabled, delete GPU entries
_hostGpuGroupsDao.deleteGpuEntries(hostId);
// delete host tags
_hostTagsDao.deleteTags(hostId);
host.setGuid(null);
final Long clusterId = host.getClusterId();
host.setClusterId(null);
_hostDao.update(host.getId(), host);
_hostDao.remove(hostId);
if (clusterId != null) {
final List<HostVO> hosts = listAllHostsInCluster(clusterId);
if (hosts.size() == 0) {
final ClusterVO cluster = _clusterDao.findById(clusterId);
cluster.setGuid(null);
_clusterDao.update(clusterId, cluster);
}
}
try {
resourceStateTransitTo(host, ResourceState.Event.DeleteHost, _nodeId);
} catch (final NoTransitionException e) {
s_logger.debug("Cannot transmit host " + host.getId() + " to Enabled state", e);
}
// Delete the associated entries in host ref table
_storagePoolHostDao.deletePrimaryRecordsForHost(hostId);
// Make sure any VMs that were marked as being on this host are cleaned up
final List<VMInstanceVO> vms = _vmDao.listByHostId(hostId);
for (final VMInstanceVO vm : vms) {
// this is how VirtualMachineManagerImpl does it when it syncs VM states
vm.setState(State.Stopped);
vm.setHostId(null);
_vmDao.persist(vm);
}
// where
for (final StoragePoolHostVO pool : pools) {
final Long poolId = pool.getPoolId();
final StoragePoolVO storagePool = _storagePoolDao.findById(poolId);
if (storagePool.isLocal() && isForceDeleteStorage) {
storagePool.setUuid(null);
storagePool.setClusterId(null);
_storagePoolDao.update(poolId, storagePool);
_storagePoolDao.remove(poolId);
s_logger.debug("Local storage id=" + poolId + " is removed as a part of host removal id=" + hostId);
}
}
// delete the op_host_capacity entry
final Object[] capacityTypes = { Capacity.CAPACITY_TYPE_CPU, Capacity.CAPACITY_TYPE_MEMORY };
final SearchCriteria<CapacityVO> hostCapacitySC = _capacityDao.createSearchCriteria();
hostCapacitySC.addAnd("hostOrPoolId", SearchCriteria.Op.EQ, hostId);
hostCapacitySC.addAnd("capacityType", SearchCriteria.Op.IN, capacityTypes);
_capacityDao.remove(hostCapacitySC);
// remove from dedicated resources
final DedicatedResourceVO dr = _dedicatedDao.findByHostId(hostId);
if (dr != null) {
_dedicatedDao.remove(dr.getId());
}
}
});
if (clusterId != null) {
_agentMgr.notifyMonitorsOfRemovedHost(host.getId(), clusterId);
}
return true;
}
use of com.cloud.vm.VMInstanceVO in project cloudstack by apache.
the class ResourceManagerImpl method doCancelMaintenance.
private boolean doCancelMaintenance(final long hostId) {
HostVO host;
host = _hostDao.findById(hostId);
if (host == null || host.getRemoved() != null) {
s_logger.warn("Unable to find host " + hostId);
return true;
}
/*
* TODO: think twice about returning true or throwing out exception, I
* really prefer to exception that always exposes bugs
*/
if (host.getResourceState() != ResourceState.PrepareForMaintenance && host.getResourceState() != ResourceState.Maintenance && host.getResourceState() != ResourceState.ErrorInMaintenance) {
throw new CloudRuntimeException("Cannot perform cancelMaintenance when resource state is " + host.getResourceState() + ", hostId = " + hostId);
}
/* TODO: move to listener */
_haMgr.cancelScheduledMigrations(host);
boolean vms_migrating = false;
final List<VMInstanceVO> vms = _haMgr.findTakenMigrationWork();
for (final VMInstanceVO vm : vms) {
if (vm.getHostId() != null && vm.getHostId() == hostId) {
s_logger.warn("Unable to cancel migration because the vm is being migrated: " + vm + ", hostId = " + hostId);
vms_migrating = true;
}
}
try {
resourceStateTransitTo(host, ResourceState.Event.AdminCancelMaintenance, _nodeId);
_agentMgr.pullAgentOutMaintenance(hostId);
// for kvm, need to log into kvm host, restart cloudstack-agent
if ((host.getHypervisorType() == HypervisorType.KVM && !vms_migrating) || host.getHypervisorType() == HypervisorType.LXC) {
final boolean sshToAgent = Boolean.parseBoolean(_configDao.getValue(Config.KvmSshToAgentEnabled.key()));
if (!sshToAgent) {
s_logger.info("Configuration tells us not to SSH into Agents. Please restart the Agent (" + hostId + ") manually");
return true;
}
_hostDao.loadDetails(host);
final String password = host.getDetail("password");
final String username = host.getDetail("username");
if (password == null || username == null) {
s_logger.debug("Can't find password/username");
return false;
}
final com.trilead.ssh2.Connection connection = SSHCmdHelper.acquireAuthorizedConnection(host.getPrivateIpAddress(), 22, username, password);
if (connection == null) {
s_logger.debug("Failed to connect to host: " + host.getPrivateIpAddress());
return false;
}
try {
SSHCmdHelper.sshExecuteCmdOneShot(connection, "service cloudstack-agent restart");
} catch (final SshException e) {
return false;
}
}
return true;
} catch (final NoTransitionException e) {
s_logger.debug("Cannot transmit host " + host.getId() + "to Enabled state", e);
return false;
}
}
use of com.cloud.vm.VMInstanceVO in project cloudstack by apache.
the class CloudOrchestrator method createVirtualMachineFromScratch.
@Override
public VirtualMachineEntity createVirtualMachineFromScratch(String id, String owner, String isoId, String hostName, String displayName, String hypervisor, String os, int cpu, int speed, long memory, Long diskSize, List<String> computeTags, List<String> rootDiskTags, Map<String, NicProfile> networkNicMap, DeploymentPlan plan) throws InsufficientCapacityException {
// VirtualMachineEntityImpl vmEntity = new VirtualMachineEntityImpl(id, owner, hostName, displayName, cpu, speed, memory, computeTags, rootDiskTags, networks, vmEntityManager);
VirtualMachineEntityImpl vmEntity = ComponentContext.inject(VirtualMachineEntityImpl.class);
vmEntity.init(id, owner, hostName, displayName, cpu, speed, memory, computeTags, rootDiskTags, new ArrayList<String>(networkNicMap.keySet()));
//load vm instance and offerings and call virtualMachineManagerImpl
VMInstanceVO vm = _vmDao.findByUuid(id);
ServiceOfferingVO computeOffering = _serviceOfferingDao.findById(vm.getId(), vm.getServiceOfferingId());
DiskOfferingInfo rootDiskOfferingInfo = new DiskOfferingInfo();
rootDiskOfferingInfo.setDiskOffering(computeOffering);
Long diskOfferingId = vm.getDiskOfferingId();
if (diskOfferingId == null) {
throw new InvalidParameterValueException("Installing from ISO requires a disk offering to be specified for the root disk.");
}
DiskOfferingVO diskOffering = _diskOfferingDao.findById(diskOfferingId);
if (diskOffering == null) {
throw new InvalidParameterValueException("Unable to find disk offering " + diskOfferingId);
}
Long size = null;
if (diskOffering.getDiskSize() == 0) {
size = diskSize;
if (size == null) {
throw new InvalidParameterValueException("Disk offering " + diskOffering + " requires size parameter.");
}
_volumeMgr.validateVolumeSizeRange(size * 1024 * 1024 * 1024);
}
rootDiskOfferingInfo.setDiskOffering(diskOffering);
rootDiskOfferingInfo.setSize(size);
if (diskOffering.isCustomizedIops() != null && diskOffering.isCustomizedIops()) {
Map<String, String> userVmDetails = _userVmDetailsDao.listDetailsKeyPairs(vm.getId());
if (userVmDetails != null) {
String minIops = userVmDetails.get("minIopsDo");
String maxIops = userVmDetails.get("maxIopsDo");
rootDiskOfferingInfo.setMinIops(minIops != null && minIops.trim().length() > 0 ? Long.parseLong(minIops) : null);
rootDiskOfferingInfo.setMaxIops(maxIops != null && maxIops.trim().length() > 0 ? Long.parseLong(maxIops) : null);
}
}
LinkedHashMap<Network, List<? extends NicProfile>> networkIpMap = new LinkedHashMap<Network, List<? extends NicProfile>>();
for (String uuid : networkNicMap.keySet()) {
NetworkVO network = _networkDao.findByUuid(uuid);
if (network != null) {
networkIpMap.put(network, new ArrayList<NicProfile>(Arrays.asList(networkNicMap.get(uuid))));
}
}
HypervisorType hypervisorType = HypervisorType.valueOf(hypervisor);
_itMgr.allocate(vm.getInstanceName(), _templateDao.findById(new Long(isoId)), computeOffering, rootDiskOfferingInfo, new ArrayList<DiskOfferingInfo>(), networkIpMap, plan, hypervisorType);
return vmEntity;
}
use of com.cloud.vm.VMInstanceVO in project cloudstack by apache.
the class NetworkOrchestrator method reallocate.
@DB
@Override
public boolean reallocate(final VirtualMachineProfile vm, final DataCenterDeployment dest) throws InsufficientCapacityException, ConcurrentOperationException {
final VMInstanceVO vmInstance = _vmDao.findById(vm.getId());
final DataCenterVO dc = _dcDao.findById(vmInstance.getDataCenterId());
if (dc.getNetworkType() == NetworkType.Basic) {
final List<NicVO> nics = _nicDao.listByVmId(vmInstance.getId());
final NetworkVO network = _networksDao.findById(nics.get(0).getNetworkId());
final LinkedHashMap<Network, List<? extends NicProfile>> profiles = new LinkedHashMap<Network, List<? extends NicProfile>>();
profiles.put(network, new ArrayList<NicProfile>());
Transaction.execute(new TransactionCallbackWithExceptionNoReturn<InsufficientCapacityException>() {
@Override
public void doInTransactionWithoutResult(final TransactionStatus status) throws InsufficientCapacityException {
cleanupNics(vm);
allocate(vm, profiles);
}
});
}
return true;
}
use of com.cloud.vm.VMInstanceVO in project cloudstack by apache.
the class XenServerStorageMotionStrategy method copyAsync.
@Override
public void copyAsync(Map<VolumeInfo, DataStore> volumeMap, VirtualMachineTO vmTo, Host srcHost, Host destHost, AsyncCompletionCallback<CopyCommandResult> callback) {
Answer answer = null;
String errMsg = null;
try {
VMInstanceVO instance = instanceDao.findById(vmTo.getId());
if (instance != null) {
if (srcHost.getClusterId().equals(destHost.getClusterId())) {
answer = migrateVmWithVolumesWithinCluster(instance, vmTo, srcHost, destHost, volumeMap);
} else {
answer = migrateVmWithVolumesAcrossCluster(instance, vmTo, srcHost, destHost, volumeMap);
}
} else {
throw new CloudRuntimeException("Unsupported operation requested for moving data.");
}
} catch (Exception e) {
s_logger.error("copy failed", e);
errMsg = e.toString();
}
CopyCommandResult result = new CopyCommandResult(null, answer);
result.setResult(errMsg);
callback.complete(result);
}
Aggregations