use of org.apache.cloudstack.storage.datastore.db.StoragePoolVO in project cloudstack by apache.
the class KVMInvestigator method isAgentAlive.
@Override
public Status isAgentAlive(Host agent) {
if (agent.getHypervisorType() != Hypervisor.HypervisorType.KVM && agent.getHypervisorType() != Hypervisor.HypervisorType.LXC) {
return null;
}
if (haManager.isHAEligible(agent)) {
return haManager.getHostStatus(agent);
}
List<StoragePoolVO> clusterPools = _storagePoolDao.listPoolsByCluster(agent.getClusterId());
boolean hasNfs = false;
for (StoragePoolVO pool : clusterPools) {
if (pool.getPoolType() == StoragePoolType.NetworkFilesystem) {
hasNfs = true;
break;
}
}
if (!hasNfs) {
List<StoragePoolVO> zonePools = _storagePoolDao.findZoneWideStoragePoolsByHypervisor(agent.getDataCenterId(), agent.getHypervisorType());
for (StoragePoolVO pool : zonePools) {
if (pool.getPoolType() == StoragePoolType.NetworkFilesystem) {
hasNfs = true;
break;
}
}
}
if (!hasNfs) {
s_logger.warn("Agent investigation was requested on host " + agent + ", but host does not support investigation because it has no NFS storage. Skipping investigation.");
return Status.Disconnected;
}
Status hostStatus = null;
Status neighbourStatus = null;
CheckOnHostCommand cmd = new CheckOnHostCommand(agent);
try {
Answer answer = _agentMgr.easySend(agent.getId(), cmd);
if (answer != null) {
hostStatus = answer.getResult() ? Status.Down : Status.Up;
}
} catch (Exception e) {
s_logger.debug("Failed to send command to host: " + agent.getId());
}
if (hostStatus == null) {
hostStatus = Status.Disconnected;
}
List<HostVO> neighbors = _resourceMgr.listHostsInClusterByStatus(agent.getClusterId(), Status.Up);
for (HostVO neighbor : neighbors) {
if (neighbor.getId() == agent.getId() || (neighbor.getHypervisorType() != Hypervisor.HypervisorType.KVM && neighbor.getHypervisorType() != Hypervisor.HypervisorType.LXC)) {
continue;
}
s_logger.debug("Investigating host:" + agent.getId() + " via neighbouring host:" + neighbor.getId());
try {
Answer answer = _agentMgr.easySend(neighbor.getId(), cmd);
if (answer != null) {
neighbourStatus = answer.getResult() ? Status.Down : Status.Up;
s_logger.debug("Neighbouring host:" + neighbor.getId() + " returned status:" + neighbourStatus + " for the investigated host:" + agent.getId());
if (neighbourStatus == Status.Up) {
break;
}
}
} catch (Exception e) {
s_logger.debug("Failed to send command to host: " + neighbor.getId());
}
}
if (neighbourStatus == Status.Up && (hostStatus == Status.Disconnected || hostStatus == Status.Down)) {
hostStatus = Status.Disconnected;
}
if (neighbourStatus == Status.Down && (hostStatus == Status.Disconnected || hostStatus == Status.Down)) {
hostStatus = Status.Down;
}
s_logger.debug("HA: HOST is ineligible legacy state " + hostStatus + " for host " + agent.getId());
return hostStatus;
}
use of org.apache.cloudstack.storage.datastore.db.StoragePoolVO in project cloudstack by apache.
the class VmwareManagerImpl method listVsphereStoragePolicyCompatibleStoragePools.
@Override
public List<StoragePool> listVsphereStoragePolicyCompatibleStoragePools(ListVsphereStoragePolicyCompatiblePoolsCmd cmd) {
Long policyId = cmd.getPolicyId();
VsphereStoragePolicyVO storagePolicy = vsphereStoragePolicyDao.findById(policyId);
if (storagePolicy == null) {
throw new CloudRuntimeException("Storage policy with ID = " + policyId + " was not found");
}
long zoneId = storagePolicy.getZoneId();
List<StoragePoolVO> poolsInZone = primaryStorageDao.listByStatusInZone(zoneId, StoragePoolStatus.Up);
List<StoragePool> compatiblePools = new ArrayList<>();
for (StoragePoolVO pool : poolsInZone) {
StorageFilerTO storageFilerTO = new StorageFilerTO(pool);
List<Long> hostIds = storageManager.getUpHostsInPool(pool.getId());
if (CollectionUtils.isNullOrEmpty(hostIds)) {
s_logger.debug("Did not find a suitable host to verify compatibility of the pool " + pool.getName());
continue;
}
Collections.shuffle(hostIds);
CheckDataStoreStoragePolicyComplainceCommand command = new CheckDataStoreStoragePolicyComplainceCommand(storagePolicy.getPolicyId(), storageFilerTO);
long targetHostId = hypervisorGuruManager.getGuruProcessedCommandTargetHost(hostIds.get(0), command);
try {
Answer answer = _agentMgr.send(targetHostId, command);
boolean result = answer != null && answer.getResult();
if (result) {
compatiblePools.add(pool);
}
} catch (AgentUnavailableException | OperationTimedoutException e) {
s_logger.error("Could not verify if storage policy " + storagePolicy.getName() + " is compatible with storage pool " + pool.getName());
}
}
return compatiblePools;
}
use of org.apache.cloudstack.storage.datastore.db.StoragePoolVO in project cloudstack by apache.
the class CleanupFullyClonedTemplatesTask method queryAllPools.
private void queryAllPools() {
List<StoragePoolVO> storagePools = primaryStorageDao.listAll();
for (StoragePoolVO pool : storagePools) {
long zoneId = pool.getDataCenterId();
queryPoolForTemplates(pool, zoneId);
}
}
use of org.apache.cloudstack.storage.datastore.db.StoragePoolVO in project cloudstack by apache.
the class RandomStoragePoolAllocator method select.
@Override
public List<StoragePool> select(DiskProfile dskCh, VirtualMachineProfile vmProfile, DeploymentPlan plan, ExcludeList avoid, int returnUpTo, boolean bypassStorageTypeCheck) {
List<StoragePool> suitablePools = new ArrayList<StoragePool>();
long dcId = plan.getDataCenterId();
Long podId = plan.getPodId();
Long clusterId = plan.getClusterId();
if (podId == null) {
return null;
}
s_logger.debug("Looking for pools in dc: " + dcId + " pod:" + podId + " cluster:" + clusterId);
List<StoragePoolVO> pools = storagePoolDao.listBy(dcId, podId, clusterId, ScopeType.CLUSTER);
if (pools.size() == 0) {
if (s_logger.isDebugEnabled()) {
s_logger.debug("No storage pools available for allocation, returning");
}
return suitablePools;
}
Collections.shuffle(pools);
if (s_logger.isDebugEnabled()) {
s_logger.debug("RandomStoragePoolAllocator has " + pools.size() + " pools to check for allocation");
}
for (StoragePoolVO pool : pools) {
if (suitablePools.size() == returnUpTo) {
break;
}
StoragePool pol = (StoragePool) this.dataStoreMgr.getPrimaryDataStore(pool.getId());
if (filter(avoid, pol, dskCh, plan)) {
suitablePools.add(pol);
}
}
if (s_logger.isDebugEnabled()) {
s_logger.debug("RandomStoragePoolAllocator returning " + suitablePools.size() + " suitable storage pools");
}
return suitablePools;
}
use of org.apache.cloudstack.storage.datastore.db.StoragePoolVO in project cloudstack by apache.
the class ElastistorPrimaryDataStoreLifeCycle method attachCluster.
@Override
public boolean attachCluster(DataStore store, ClusterScope scope) {
dataStoreHelper.attachCluster(store);
StoragePoolVO dataStoreVO = _storagePoolDao.findById(store.getId());
PrimaryDataStoreInfo primarystore = (PrimaryDataStoreInfo) store;
// Check if there is host up in this cluster
List<HostVO> allHosts = _resourceMgr.listAllUpHosts(Host.Type.Routing, primarystore.getClusterId(), primarystore.getPodId(), primarystore.getDataCenterId());
if (allHosts.isEmpty()) {
primaryDataStoreDao.expunge(primarystore.getId());
throw new CloudRuntimeException("No host up to associate a storage pool with in cluster " + primarystore.getClusterId());
}
if (!dataStoreVO.isManaged()) {
boolean success = false;
for (HostVO h : allHosts) {
success = createStoragePool(h.getId(), primarystore);
if (success) {
break;
}
}
}
s_logger.debug("In createPool Adding the pool to each of the hosts");
List<HostVO> poolHosts = new ArrayList<HostVO>();
for (HostVO h : allHosts) {
try {
storageMgr.connectHostToSharedPool(h.getId(), primarystore.getId());
poolHosts.add(h);
} catch (Exception e) {
s_logger.warn("Unable to establish a connection between " + h + " and " + primarystore, e);
}
if (poolHosts.isEmpty()) {
s_logger.warn("No host can access storage pool " + primarystore + " on cluster " + primarystore.getClusterId());
primaryDataStoreDao.expunge(primarystore.getId());
throw new CloudRuntimeException("Failed to access storage pool");
}
}
return true;
}
Aggregations