Search in sources :

Example 6 with Transaction

use of com.cloud.utils.db.Transaction in project CloudStack-archive by CloudStack-extras.

the class UsageManagerImpl method start.

public boolean start() {
    if (s_logger.isInfoEnabled()) {
        s_logger.info("Starting Usage Manager");
    }
    // use the configured exec time and aggregation duration for scheduling the job
    m_scheduledFuture = m_executor.scheduleAtFixedRate(this, m_jobExecTime.getTimeInMillis() - System.currentTimeMillis(), m_aggregationDuration * 60 * 1000, TimeUnit.MILLISECONDS);
    m_heartbeat = m_heartbeatExecutor.scheduleAtFixedRate(new Heartbeat(), /* start in 15 seconds...*/
    15 * 1000, /* check database every minute*/
    60 * 1000, TimeUnit.MILLISECONDS);
    if (m_sanityCheckInterval > 0) {
        m_sanity = m_sanityExecutor.scheduleAtFixedRate(new SanityCheck(), 1, m_sanityCheckInterval, TimeUnit.DAYS);
    }
    Transaction usageTxn = Transaction.open(Transaction.USAGE_DB);
    try {
        if (m_heartbeatLock.lock(3)) {
            // 3 second timeout
            try {
                UsageJobVO job = m_usageJobDao.getLastJob();
                if (job == null) {
                    m_usageJobDao.createNewJob(m_hostname, m_pid, UsageJobVO.JOB_TYPE_RECURRING);
                }
            } finally {
                m_heartbeatLock.unlock();
            }
        } else {
            if (s_logger.isTraceEnabled())
                s_logger.trace("Heartbeat lock is in use by others, returning true as someone else will take over the job if required");
        }
    } finally {
        usageTxn.close();
    }
    return true;
}
Also used : Transaction(com.cloud.utils.db.Transaction)

Example 7 with Transaction

use of com.cloud.utils.db.Transaction in project CloudStack-archive by CloudStack-extras.

the class UsageManagerImpl method parse.

public void parse(UsageJobVO job, long startDateMillis, long endDateMillis) {
    // TODO: Shouldn't we also allow parsing by the type of usage?
    boolean success = false;
    long timeStart = System.currentTimeMillis();
    long deleteOldStatsTimeMillis = 0L;
    try {
        if ((endDateMillis == 0) || (endDateMillis > timeStart)) {
            endDateMillis = timeStart;
        }
        long lastSuccess = m_usageJobDao.getLastJobSuccessDateMillis();
        if (lastSuccess != 0) {
            // 1 millisecond after
            startDateMillis = lastSuccess + 1;
        }
        if (startDateMillis >= endDateMillis) {
            if (s_logger.isInfoEnabled()) {
                s_logger.info("not parsing usage records since start time mills (" + startDateMillis + ") is on or after end time millis (" + endDateMillis + ")");
            }
            Transaction jobUpdateTxn = Transaction.open(Transaction.USAGE_DB);
            try {
                jobUpdateTxn.start();
                // everything seemed to work...set endDate as the last success date
                m_usageJobDao.updateJobSuccess(job.getId(), startDateMillis, endDateMillis, System.currentTimeMillis() - timeStart, success);
                // create a new job if this is a recurring job
                if (job.getJobType() == UsageJobVO.JOB_TYPE_RECURRING) {
                    m_usageJobDao.createNewJob(m_hostname, m_pid, UsageJobVO.JOB_TYPE_RECURRING);
                }
                jobUpdateTxn.commit();
            } finally {
                jobUpdateTxn.close();
            }
            return;
        }
        deleteOldStatsTimeMillis = startDateMillis;
        Date startDate = new Date(startDateMillis);
        Date endDate = new Date(endDateMillis);
        if (s_logger.isInfoEnabled()) {
            s_logger.info("Parsing usage records between " + startDate + " and " + endDate);
        }
        List<AccountVO> accounts = null;
        List<UserStatisticsVO> userStats = null;
        Map<String, UsageNetworkVO> networkStats = null;
        Transaction userTxn = Transaction.open(Transaction.CLOUD_DB);
        try {
            Long limit = Long.valueOf(500);
            Long offset = Long.valueOf(0);
            Long lastAccountId = m_usageDao.getLastAccountId();
            if (lastAccountId == null) {
                lastAccountId = Long.valueOf(0);
            }
            do {
                Filter filter = new Filter(AccountVO.class, "id", true, offset, limit);
                accounts = m_accountDao.findActiveAccounts(lastAccountId, filter);
                if ((accounts != null) && !accounts.isEmpty()) {
                    // now update the accounts in the cloud_usage db
                    m_usageDao.updateAccounts(accounts);
                }
                offset = new Long(offset.longValue() + limit.longValue());
            } while ((accounts != null) && !accounts.isEmpty());
            // reset offset
            offset = Long.valueOf(0);
            do {
                Filter filter = new Filter(AccountVO.class, "id", true, offset, limit);
                accounts = m_accountDao.findRecentlyDeletedAccounts(lastAccountId, startDate, filter);
                if ((accounts != null) && !accounts.isEmpty()) {
                    // now update the accounts in the cloud_usage db
                    m_usageDao.updateAccounts(accounts);
                }
                offset = new Long(offset.longValue() + limit.longValue());
            } while ((accounts != null) && !accounts.isEmpty());
            // reset offset
            offset = Long.valueOf(0);
            do {
                Filter filter = new Filter(AccountVO.class, "id", true, offset, limit);
                accounts = m_accountDao.findNewAccounts(lastAccountId, filter);
                if ((accounts != null) && !accounts.isEmpty()) {
                    // now copy the accounts to cloud_usage db
                    m_usageDao.saveAccounts(accounts);
                }
                offset = new Long(offset.longValue() + limit.longValue());
            } while ((accounts != null) && !accounts.isEmpty());
            // reset offset
            offset = Long.valueOf(0);
            // get all the user stats to create usage records for the network usage
            Long lastUserStatsId = m_usageDao.getLastUserStatsId();
            if (lastUserStatsId == null) {
                lastUserStatsId = Long.valueOf(0);
            }
            SearchCriteria<UserStatisticsVO> sc2 = m_userStatsDao.createSearchCriteria();
            sc2.addAnd("id", SearchCriteria.Op.LTEQ, lastUserStatsId);
            do {
                Filter filter = new Filter(UserStatisticsVO.class, "id", true, offset, limit);
                userStats = m_userStatsDao.search(sc2, filter);
                if ((userStats != null) && !userStats.isEmpty()) {
                    // now copy the accounts to cloud_usage db
                    m_usageDao.updateUserStats(userStats);
                }
                offset = new Long(offset.longValue() + limit.longValue());
            } while ((userStats != null) && !userStats.isEmpty());
            // reset offset
            offset = Long.valueOf(0);
            sc2 = m_userStatsDao.createSearchCriteria();
            sc2.addAnd("id", SearchCriteria.Op.GT, lastUserStatsId);
            do {
                Filter filter = new Filter(UserStatisticsVO.class, "id", true, offset, limit);
                userStats = m_userStatsDao.search(sc2, filter);
                if ((userStats != null) && !userStats.isEmpty()) {
                    // now copy the accounts to cloud_usage db
                    m_usageDao.saveUserStats(userStats);
                }
                offset = new Long(offset.longValue() + limit.longValue());
            } while ((userStats != null) && !userStats.isEmpty());
        } finally {
            userTxn.close();
        }
        // TODO:  Fetch a maximum number of events and process them before moving on to the next range of events
        // - get a list of the latest events
        // - insert the latest events into the usage.events table
        List<UsageEventVO> events = _usageEventDao.getRecentEvents(new Date(endDateMillis));
        Transaction usageTxn = Transaction.open(Transaction.USAGE_DB);
        try {
            usageTxn.start();
            // to newest, so just test against the first event)
            if ((events != null) && (events.size() > 0)) {
                Date oldestEventDate = events.get(0).getCreateDate();
                if (oldestEventDate.getTime() < startDateMillis) {
                    startDateMillis = oldestEventDate.getTime();
                    startDate = new Date(startDateMillis);
                }
                // - create the usage records using the parse methods below
                for (UsageEventVO event : events) {
                    event.setProcessed(true);
                    _usageEventDao.update(event.getId(), event);
                    createHelperRecord(event);
                }
            }
            // TODO:  Fetch a maximum number of user stats and process them before moving on to the next range of user stats
            // get user stats in order to compute network usage
            networkStats = m_usageNetworkDao.getRecentNetworkStats();
            Calendar recentlyDeletedCal = Calendar.getInstance(m_usageTimezone);
            recentlyDeletedCal.setTimeInMillis(startDateMillis);
            recentlyDeletedCal.add(Calendar.MINUTE, -1 * THREE_DAYS_IN_MINUTES);
            Date recentlyDeletedDate = recentlyDeletedCal.getTime();
            // Keep track of user stats for an account, across all of its public IPs
            Map<String, UserStatisticsVO> aggregatedStats = new HashMap<String, UserStatisticsVO>();
            int startIndex = 0;
            do {
                userStats = m_userStatsDao.listActiveAndRecentlyDeleted(recentlyDeletedDate, startIndex, 500);
                if (userStats != null) {
                    for (UserStatisticsVO userStat : userStats) {
                        if (userStat.getDeviceId() != null) {
                            String hostKey = userStat.getDataCenterId() + "-" + userStat.getAccountId() + "-Host-" + userStat.getDeviceId();
                            UserStatisticsVO hostAggregatedStat = aggregatedStats.get(hostKey);
                            if (hostAggregatedStat == null) {
                                hostAggregatedStat = new UserStatisticsVO(userStat.getAccountId(), userStat.getDataCenterId(), userStat.getPublicIpAddress(), userStat.getDeviceId(), userStat.getDeviceType(), userStat.getNetworkId());
                            }
                            hostAggregatedStat.setAggBytesSent(hostAggregatedStat.getAggBytesSent() + userStat.getAggBytesSent());
                            hostAggregatedStat.setAggBytesReceived(hostAggregatedStat.getAggBytesReceived() + userStat.getAggBytesReceived());
                            aggregatedStats.put(hostKey, hostAggregatedStat);
                        }
                    }
                }
                startIndex += 500;
            } while ((userStats != null) && !userStats.isEmpty());
            // loop over the user stats, create delta entries in the usage_network helper table
            int numAcctsProcessed = 0;
            for (String key : aggregatedStats.keySet()) {
                UsageNetworkVO currentNetworkStats = null;
                if (networkStats != null) {
                    currentNetworkStats = networkStats.get(key);
                }
                createNetworkHelperEntry(aggregatedStats.get(key), currentNetworkStats, endDateMillis);
                numAcctsProcessed++;
            }
            if (s_logger.isDebugEnabled()) {
                s_logger.debug("created network stats helper entries for " + numAcctsProcessed + " accts");
            }
            // commit the helper records, then start a new transaction
            usageTxn.commit();
            usageTxn.start();
            boolean parsed = false;
            numAcctsProcessed = 0;
            Date currentStartDate = startDate;
            Date currentEndDate = endDate;
            Date tempDate = endDate;
            Calendar aggregateCal = Calendar.getInstance(m_usageTimezone);
            while ((tempDate.after(startDate)) && ((tempDate.getTime() - startDate.getTime()) > 60000)) {
                currentEndDate = tempDate;
                aggregateCal.setTime(tempDate);
                aggregateCal.add(Calendar.MINUTE, -m_aggregationDuration);
                tempDate = aggregateCal.getTime();
            }
            while (!currentEndDate.after(endDate) || (currentEndDate.getTime() - endDate.getTime() < 60000)) {
                Long offset = Long.valueOf(0);
                Long limit = Long.valueOf(500);
                do {
                    Filter filter = new Filter(AccountVO.class, "id", true, offset, limit);
                    accounts = m_accountDao.listAll(filter);
                    if ((accounts != null) && !accounts.isEmpty()) {
                        for (AccountVO account : accounts) {
                            parsed = parseHelperTables(account, currentStartDate, currentEndDate);
                            numAcctsProcessed++;
                        }
                    }
                    offset = new Long(offset.longValue() + limit.longValue());
                } while ((accounts != null) && !accounts.isEmpty());
                if (s_logger.isDebugEnabled()) {
                    s_logger.debug("processed VM/Network Usage for " + numAcctsProcessed + " ACTIVE accts");
                }
                numAcctsProcessed = 0;
                // reset offset
                offset = Long.valueOf(0);
                do {
                    Filter filter = new Filter(AccountVO.class, "id", true, offset, limit);
                    accounts = m_accountDao.findRecentlyDeletedAccounts(null, recentlyDeletedDate, filter);
                    if ((accounts != null) && !accounts.isEmpty()) {
                        for (AccountVO account : accounts) {
                            parsed = parseHelperTables(account, currentStartDate, currentEndDate);
                            List<Long> publicTemplates = m_usageDao.listPublicTemplatesByAccount(account.getId());
                            for (Long templateId : publicTemplates) {
                                //mark public templates owned by deleted accounts as deleted
                                List<UsageStorageVO> storageVOs = m_usageStorageDao.listById(account.getId(), templateId, StorageTypes.TEMPLATE);
                                if (storageVOs.size() > 1) {
                                    s_logger.warn("More that one usage entry for storage: " + templateId + " assigned to account: " + account.getId() + "; marking them all as deleted...");
                                }
                                for (UsageStorageVO storageVO : storageVOs) {
                                    if (s_logger.isDebugEnabled()) {
                                        s_logger.debug("deleting template: " + storageVO.getId() + " from account: " + storageVO.getAccountId());
                                    }
                                    storageVO.setDeleted(account.getRemoved());
                                    m_usageStorageDao.update(storageVO);
                                }
                            }
                            numAcctsProcessed++;
                        }
                    }
                    offset = new Long(offset.longValue() + limit.longValue());
                } while ((accounts != null) && !accounts.isEmpty());
                currentStartDate = new Date(currentEndDate.getTime() + 1);
                aggregateCal.setTime(currentEndDate);
                aggregateCal.add(Calendar.MINUTE, m_aggregationDuration);
                currentEndDate = aggregateCal.getTime();
            }
            if (s_logger.isDebugEnabled()) {
                s_logger.debug("processed Usage for " + numAcctsProcessed + " RECENTLY DELETED accts");
            }
            //        do we want to break out of processing accounts and rollback if there are errors?
            if (!parsed) {
                usageTxn.rollback();
            } else {
                success = true;
            }
        } catch (Exception ex) {
            s_logger.error("Exception in usage manager", ex);
            usageTxn.rollback();
        } finally {
            // everything seemed to work...set endDate as the last success date
            m_usageJobDao.updateJobSuccess(job.getId(), startDateMillis, endDateMillis, System.currentTimeMillis() - timeStart, success);
            // create a new job if this is a recurring job
            if (job.getJobType() == UsageJobVO.JOB_TYPE_RECURRING) {
                m_usageJobDao.createNewJob(m_hostname, m_pid, UsageJobVO.JOB_TYPE_RECURRING);
            }
            usageTxn.commit();
            usageTxn.close();
            // switch back to CLOUD_DB
            Transaction swap = Transaction.open(Transaction.CLOUD_DB);
            if (!success) {
                _alertMgr.sendAlert(AlertManager.ALERT_TYPE_USAGE_SERVER_RESULT, 0, new Long(0), "Usage job failed. Job id: " + job.getId(), "Usage job failed. Job id: " + job.getId());
            } else {
                _alertMgr.clearAlert(AlertManager.ALERT_TYPE_USAGE_SERVER_RESULT, 0, 0);
            }
            swap.close();
        }
    } catch (Exception e) {
        s_logger.error("Usage Manager error", e);
    }
}
Also used : HashMap(java.util.HashMap) Calendar(java.util.Calendar) UsageEventVO(com.cloud.event.UsageEventVO) AccountVO(com.cloud.user.AccountVO) Date(java.util.Date) ConfigurationException(javax.naming.ConfigurationException) CloudRuntimeException(com.cloud.utils.exception.CloudRuntimeException) SQLException(java.sql.SQLException) Transaction(com.cloud.utils.db.Transaction) Filter(com.cloud.utils.db.Filter) UserStatisticsVO(com.cloud.user.UserStatisticsVO)

Example 8 with Transaction

use of com.cloud.utils.db.Transaction in project CloudStack-archive by CloudStack-extras.

the class SimulatorManagerImpl method getVmStates.

@Override
@DB
public Map<String, State> getVmStates(String hostGuid) {
    Transaction txn = Transaction.currentTxn();
    txn.transitToUserManagedConnection(_concierge.conn());
    try {
        return _mockVmMgr.getVmStates(hostGuid);
    } finally {
        txn.transitToAutoManagedConnection(Transaction.CLOUD_DB);
    }
}
Also used : Transaction(com.cloud.utils.db.Transaction) DB(com.cloud.utils.db.DB)

Example 9 with Transaction

use of com.cloud.utils.db.Transaction in project CloudStack-archive by CloudStack-extras.

the class EncryptionSecretKeyChanger method migrateData.

private boolean migrateData(String oldDBKey, String newDBKey) {
    System.out.println("Begin Data migration");
    initEncryptor(oldEncryptor, oldDBKey);
    initEncryptor(newEncryptor, newDBKey);
    System.out.println("Initialised Encryptors");
    Transaction txn = Transaction.open("Migrate");
    txn.start();
    try {
        Connection conn;
        try {
            conn = txn.getConnection();
        } catch (SQLException e) {
            throw new CloudRuntimeException("Unable to migrate encrypted data in the database", e);
        }
        migrateConfigValues(conn);
        migrateHostDetails(conn);
        migrateVNCPassword(conn);
        migrateUserCredentials(conn);
        txn.commit();
    } finally {
        txn.close();
    }
    System.out.println("End Data migration");
    return true;
}
Also used : Transaction(com.cloud.utils.db.Transaction) SQLException(java.sql.SQLException) CloudRuntimeException(com.cloud.utils.exception.CloudRuntimeException) Connection(java.sql.Connection)

Example 10 with Transaction

use of com.cloud.utils.db.Transaction in project CloudStack-archive by CloudStack-extras.

the class TestAsync method testMaid.

/*
	public static class SampleAsyncResult {
		@Param(name="name", propName="name")
		private final String _name;
		
		@Param
		private final int count;
		
		public SampleAsyncResult(String name, int count) {
			_name = name;
			this.count = count;
		}
		
		public String getName() { return _name; }
		public int getCount() { return count; }
	}

	public void testDao() {
		AsyncJobDao dao = new AsyncJobDaoImpl();
		AsyncJobVO job = new AsyncJobVO(1, 1, "TestCmd", null);
		job.setInstanceType("user_vm");
		job.setInstanceId(1000L);
		
		char[] buf = new char[1024];
		for(int i = 0; i < 1024; i++)
			buf[i] = 'a';
			
		job.setResult(new String(buf));
		dao.persist(job);
		
		AsyncJobVO jobVerify = dao.findById(job.getId());
		
		Assert.assertTrue(jobVerify.getCmd().equals(job.getCmd()));
		Assert.assertTrue(jobVerify.getUserId() == 1);
		Assert.assertTrue(jobVerify.getAccountId() == 1);
		
		String result = jobVerify.getResult();
		for(int i = 0; i < 1024; i++)
			Assert.assertTrue(result.charAt(i) == 'a');
		
		jobVerify = dao.findInstancePendingAsyncJob("user_vm", 1000L);
		Assert.assertTrue(jobVerify != null);
		Assert.assertTrue(jobVerify.getCmd().equals(job.getCmd()));
		Assert.assertTrue(jobVerify.getUserId() == 1);
		Assert.assertTrue(jobVerify.getAccountId() == 1);
	}
	
	public void testSerialization() {
		List<Pair<String, Object>> l;
		int value = 1;
		l = SerializerHelper.toPairList(value, "result");
		Assert.assertTrue(l.size() == 1);
		Assert.assertTrue(l.get(0).first().equals("result"));
		Assert.assertTrue(l.get(0).second().equals("1"));
		l.clear();
		
		SampleAsyncResult result = new SampleAsyncResult("vmops", 1);
		l = SerializerHelper.toPairList(result, "result");
		
		Assert.assertTrue(l.size() == 2);
		Assert.assertTrue(l.get(0).first().equals("name"));
		Assert.assertTrue(l.get(0).second().equals("vmops"));
		Assert.assertTrue(l.get(1).first().equals("count"));
		Assert.assertTrue(l.get(1).second().equals("1"));
	}
	
	public void testAsyncResult() {
		AsyncJobResult result = new AsyncJobResult(1);
		
		result.setResultObject(100);
		Assert.assertTrue(result.getResult().equals("java.lang.Integer/100"));
		
		Object obj = result.getResultObject();
		Assert.assertTrue(obj instanceof Integer);
		Assert.assertTrue(((Integer)obj).intValue() == 100);
	}

	public void testTransaction() {
		Transaction txn = Transaction.open("testTransaction");
		try {
			txn.start();
			
			AsyncJobDao dao = new AsyncJobDaoImpl();
			AsyncJobVO job = new AsyncJobVO(1, 1, "TestCmd", null);
			job.setInstanceType("user_vm");
			job.setInstanceId(1000L);
			job.setResult("");
			dao.persist(job);
			txn.rollback();
		} finally {
			txn.close();
		}
	}
	
	public void testMorevingian() {
		int threadCount = 10;
		final int testCount = 10;
		
		Thread[] threads = new Thread[threadCount];
		for(int i = 0; i < threadCount; i++) {
			final int threadNum = i + 1;
			threads[i] = new Thread(new Runnable() {
				public void run() {
					for(int i = 0; i < testCount; i++) {
						Transaction txn = Transaction.open(Transaction.CLOUD_DB);
						try {
							AsyncJobDao dao = new AsyncJobDaoImpl();
							
							s_logger.info("Thread " + threadNum + " acquiring lock");
							AsyncJobVO job = dao.acquire(1L, 30);
							if(job != null) {
								s_logger.info("Thread " + threadNum + " acquired lock");
								
								try {
									Thread.sleep(Log4jEnabledTestCase.getRandomMilliseconds(1000, 3000));
								} catch (InterruptedException e) {
								}
								
								s_logger.info("Thread " + threadNum + " acquiring lock nestly");
								AsyncJobVO job2 = dao.acquire(1L, 30);
								if(job2 != null) {
									s_logger.info("Thread " + threadNum + " acquired lock nestly");
									
									try {
										Thread.sleep(Log4jEnabledTestCase.getRandomMilliseconds(1000, 3000));
									} catch (InterruptedException e) {
									}
									
									s_logger.info("Thread " + threadNum + " releasing lock (nestly acquired)");
									dao.release(1L);
									s_logger.info("Thread " + threadNum + " released lock (nestly acquired)");
									
								} else {
									s_logger.info("Thread " + threadNum + " was unable to acquire lock nestly");
								}
								
								s_logger.info("Thread " + threadNum + " releasing lock");
								dao.release(1L);
								s_logger.info("Thread " + threadNum + " released lock");
							} else {
								s_logger.info("Thread " + threadNum + " was unable to acquire lock");
							}
						} finally {
							txn.close();
						}
						
						try {
							Thread.sleep(Log4jEnabledTestCase.getRandomMilliseconds(1000, 10000));
						} catch (InterruptedException e) {
						}
					}
				}
			});
		}
		
		for(int i = 0; i < threadCount; i++) {
			threads[i].start();
		}
		
		for(int i = 0; i < threadCount; i++) {
			try {
				threads[i].join();
			} catch (InterruptedException e) {
			}
		}
	}
	*/
public void testMaid() {
    Transaction txn = Transaction.open(Transaction.CLOUD_DB);
    StackMaidDao dao = new StackMaidDaoImpl();
    dao.pushCleanupDelegate(1L, 0, "delegate1", "Hello, world");
    dao.pushCleanupDelegate(1L, 1, "delegate2", new Long(100));
    dao.pushCleanupDelegate(1L, 2, "delegate3", null);
    CheckPointVO item = dao.popCleanupDelegate(1L);
    Assert.assertTrue(item.getDelegate().equals("delegate3"));
    Assert.assertTrue(item.getContext() == null);
    item = dao.popCleanupDelegate(1L);
    Assert.assertTrue(item.getDelegate().equals("delegate2"));
    s_logger.info(item.getContext());
    item = dao.popCleanupDelegate(1L);
    Assert.assertTrue(item.getDelegate().equals("delegate1"));
    s_logger.info(item.getContext());
    txn.close();
}
Also used : Transaction(com.cloud.utils.db.Transaction) StackMaidDao(com.cloud.cluster.dao.StackMaidDao) StackMaidDaoImpl(com.cloud.cluster.dao.StackMaidDaoImpl) CheckPointVO(com.cloud.cluster.CheckPointVO)

Aggregations

Transaction (com.cloud.utils.db.Transaction)11 StackMaidDao (com.cloud.cluster.dao.StackMaidDao)3 StackMaidDaoImpl (com.cloud.cluster.dao.StackMaidDaoImpl)3 DB (com.cloud.utils.db.DB)3 CloudRuntimeException (com.cloud.utils.exception.CloudRuntimeException)2 SQLException (java.sql.SQLException)2 Answer (com.cloud.agent.api.Answer)1 AttachIsoCommand (com.cloud.agent.api.AttachIsoCommand)1 AttachVolumeCommand (com.cloud.agent.api.AttachVolumeCommand)1 BackupSnapshotCommand (com.cloud.agent.api.BackupSnapshotCommand)1 CheckHealthCommand (com.cloud.agent.api.CheckHealthCommand)1 CheckNetworkCommand (com.cloud.agent.api.CheckNetworkCommand)1 CleanupNetworkRulesCmd (com.cloud.agent.api.CleanupNetworkRulesCmd)1 ClusterSyncCommand (com.cloud.agent.api.ClusterSyncCommand)1 ComputeChecksumCommand (com.cloud.agent.api.ComputeChecksumCommand)1 CreatePrivateTemplateFromSnapshotCommand (com.cloud.agent.api.CreatePrivateTemplateFromSnapshotCommand)1 CreatePrivateTemplateFromVolumeCommand (com.cloud.agent.api.CreatePrivateTemplateFromVolumeCommand)1 CreateStoragePoolCommand (com.cloud.agent.api.CreateStoragePoolCommand)1 CreateVolumeFromSnapshotCommand (com.cloud.agent.api.CreateVolumeFromSnapshotCommand)1 DeleteSnapshotBackupCommand (com.cloud.agent.api.DeleteSnapshotBackupCommand)1