use of com.cloud.user.UserStatisticsVO in project CloudStack-archive by CloudStack-extras.
the class UsageManagerImpl method parse.
public void parse(UsageJobVO job, long startDateMillis, long endDateMillis) {
// TODO: Shouldn't we also allow parsing by the type of usage?
boolean success = false;
long timeStart = System.currentTimeMillis();
long deleteOldStatsTimeMillis = 0L;
try {
if ((endDateMillis == 0) || (endDateMillis > timeStart)) {
endDateMillis = timeStart;
}
long lastSuccess = m_usageJobDao.getLastJobSuccessDateMillis();
if (lastSuccess != 0) {
// 1 millisecond after
startDateMillis = lastSuccess + 1;
}
if (startDateMillis >= endDateMillis) {
if (s_logger.isInfoEnabled()) {
s_logger.info("not parsing usage records since start time mills (" + startDateMillis + ") is on or after end time millis (" + endDateMillis + ")");
}
Transaction jobUpdateTxn = Transaction.open(Transaction.USAGE_DB);
try {
jobUpdateTxn.start();
// everything seemed to work...set endDate as the last success date
m_usageJobDao.updateJobSuccess(job.getId(), startDateMillis, endDateMillis, System.currentTimeMillis() - timeStart, success);
// create a new job if this is a recurring job
if (job.getJobType() == UsageJobVO.JOB_TYPE_RECURRING) {
m_usageJobDao.createNewJob(m_hostname, m_pid, UsageJobVO.JOB_TYPE_RECURRING);
}
jobUpdateTxn.commit();
} finally {
jobUpdateTxn.close();
}
return;
}
deleteOldStatsTimeMillis = startDateMillis;
Date startDate = new Date(startDateMillis);
Date endDate = new Date(endDateMillis);
if (s_logger.isInfoEnabled()) {
s_logger.info("Parsing usage records between " + startDate + " and " + endDate);
}
List<AccountVO> accounts = null;
List<UserStatisticsVO> userStats = null;
Map<String, UsageNetworkVO> networkStats = null;
Transaction userTxn = Transaction.open(Transaction.CLOUD_DB);
try {
Long limit = Long.valueOf(500);
Long offset = Long.valueOf(0);
Long lastAccountId = m_usageDao.getLastAccountId();
if (lastAccountId == null) {
lastAccountId = Long.valueOf(0);
}
do {
Filter filter = new Filter(AccountVO.class, "id", true, offset, limit);
accounts = m_accountDao.findActiveAccounts(lastAccountId, filter);
if ((accounts != null) && !accounts.isEmpty()) {
// now update the accounts in the cloud_usage db
m_usageDao.updateAccounts(accounts);
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((accounts != null) && !accounts.isEmpty());
// reset offset
offset = Long.valueOf(0);
do {
Filter filter = new Filter(AccountVO.class, "id", true, offset, limit);
accounts = m_accountDao.findRecentlyDeletedAccounts(lastAccountId, startDate, filter);
if ((accounts != null) && !accounts.isEmpty()) {
// now update the accounts in the cloud_usage db
m_usageDao.updateAccounts(accounts);
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((accounts != null) && !accounts.isEmpty());
// reset offset
offset = Long.valueOf(0);
do {
Filter filter = new Filter(AccountVO.class, "id", true, offset, limit);
accounts = m_accountDao.findNewAccounts(lastAccountId, filter);
if ((accounts != null) && !accounts.isEmpty()) {
// now copy the accounts to cloud_usage db
m_usageDao.saveAccounts(accounts);
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((accounts != null) && !accounts.isEmpty());
// reset offset
offset = Long.valueOf(0);
// get all the user stats to create usage records for the network usage
Long lastUserStatsId = m_usageDao.getLastUserStatsId();
if (lastUserStatsId == null) {
lastUserStatsId = Long.valueOf(0);
}
SearchCriteria<UserStatisticsVO> sc2 = m_userStatsDao.createSearchCriteria();
sc2.addAnd("id", SearchCriteria.Op.LTEQ, lastUserStatsId);
do {
Filter filter = new Filter(UserStatisticsVO.class, "id", true, offset, limit);
userStats = m_userStatsDao.search(sc2, filter);
if ((userStats != null) && !userStats.isEmpty()) {
// now copy the accounts to cloud_usage db
m_usageDao.updateUserStats(userStats);
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((userStats != null) && !userStats.isEmpty());
// reset offset
offset = Long.valueOf(0);
sc2 = m_userStatsDao.createSearchCriteria();
sc2.addAnd("id", SearchCriteria.Op.GT, lastUserStatsId);
do {
Filter filter = new Filter(UserStatisticsVO.class, "id", true, offset, limit);
userStats = m_userStatsDao.search(sc2, filter);
if ((userStats != null) && !userStats.isEmpty()) {
// now copy the accounts to cloud_usage db
m_usageDao.saveUserStats(userStats);
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((userStats != null) && !userStats.isEmpty());
} finally {
userTxn.close();
}
// TODO: Fetch a maximum number of events and process them before moving on to the next range of events
// - get a list of the latest events
// - insert the latest events into the usage.events table
List<UsageEventVO> events = _usageEventDao.getRecentEvents(new Date(endDateMillis));
Transaction usageTxn = Transaction.open(Transaction.USAGE_DB);
try {
usageTxn.start();
// to newest, so just test against the first event)
if ((events != null) && (events.size() > 0)) {
Date oldestEventDate = events.get(0).getCreateDate();
if (oldestEventDate.getTime() < startDateMillis) {
startDateMillis = oldestEventDate.getTime();
startDate = new Date(startDateMillis);
}
// - create the usage records using the parse methods below
for (UsageEventVO event : events) {
event.setProcessed(true);
_usageEventDao.update(event.getId(), event);
createHelperRecord(event);
}
}
// TODO: Fetch a maximum number of user stats and process them before moving on to the next range of user stats
// get user stats in order to compute network usage
networkStats = m_usageNetworkDao.getRecentNetworkStats();
Calendar recentlyDeletedCal = Calendar.getInstance(m_usageTimezone);
recentlyDeletedCal.setTimeInMillis(startDateMillis);
recentlyDeletedCal.add(Calendar.MINUTE, -1 * THREE_DAYS_IN_MINUTES);
Date recentlyDeletedDate = recentlyDeletedCal.getTime();
// Keep track of user stats for an account, across all of its public IPs
Map<String, UserStatisticsVO> aggregatedStats = new HashMap<String, UserStatisticsVO>();
int startIndex = 0;
do {
userStats = m_userStatsDao.listActiveAndRecentlyDeleted(recentlyDeletedDate, startIndex, 500);
if (userStats != null) {
for (UserStatisticsVO userStat : userStats) {
if (userStat.getDeviceId() != null) {
String hostKey = userStat.getDataCenterId() + "-" + userStat.getAccountId() + "-Host-" + userStat.getDeviceId();
UserStatisticsVO hostAggregatedStat = aggregatedStats.get(hostKey);
if (hostAggregatedStat == null) {
hostAggregatedStat = new UserStatisticsVO(userStat.getAccountId(), userStat.getDataCenterId(), userStat.getPublicIpAddress(), userStat.getDeviceId(), userStat.getDeviceType(), userStat.getNetworkId());
}
hostAggregatedStat.setAggBytesSent(hostAggregatedStat.getAggBytesSent() + userStat.getAggBytesSent());
hostAggregatedStat.setAggBytesReceived(hostAggregatedStat.getAggBytesReceived() + userStat.getAggBytesReceived());
aggregatedStats.put(hostKey, hostAggregatedStat);
}
}
}
startIndex += 500;
} while ((userStats != null) && !userStats.isEmpty());
// loop over the user stats, create delta entries in the usage_network helper table
int numAcctsProcessed = 0;
for (String key : aggregatedStats.keySet()) {
UsageNetworkVO currentNetworkStats = null;
if (networkStats != null) {
currentNetworkStats = networkStats.get(key);
}
createNetworkHelperEntry(aggregatedStats.get(key), currentNetworkStats, endDateMillis);
numAcctsProcessed++;
}
if (s_logger.isDebugEnabled()) {
s_logger.debug("created network stats helper entries for " + numAcctsProcessed + " accts");
}
// commit the helper records, then start a new transaction
usageTxn.commit();
usageTxn.start();
boolean parsed = false;
numAcctsProcessed = 0;
Date currentStartDate = startDate;
Date currentEndDate = endDate;
Date tempDate = endDate;
Calendar aggregateCal = Calendar.getInstance(m_usageTimezone);
while ((tempDate.after(startDate)) && ((tempDate.getTime() - startDate.getTime()) > 60000)) {
currentEndDate = tempDate;
aggregateCal.setTime(tempDate);
aggregateCal.add(Calendar.MINUTE, -m_aggregationDuration);
tempDate = aggregateCal.getTime();
}
while (!currentEndDate.after(endDate) || (currentEndDate.getTime() - endDate.getTime() < 60000)) {
Long offset = Long.valueOf(0);
Long limit = Long.valueOf(500);
do {
Filter filter = new Filter(AccountVO.class, "id", true, offset, limit);
accounts = m_accountDao.listAll(filter);
if ((accounts != null) && !accounts.isEmpty()) {
for (AccountVO account : accounts) {
parsed = parseHelperTables(account, currentStartDate, currentEndDate);
numAcctsProcessed++;
}
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((accounts != null) && !accounts.isEmpty());
if (s_logger.isDebugEnabled()) {
s_logger.debug("processed VM/Network Usage for " + numAcctsProcessed + " ACTIVE accts");
}
numAcctsProcessed = 0;
// reset offset
offset = Long.valueOf(0);
do {
Filter filter = new Filter(AccountVO.class, "id", true, offset, limit);
accounts = m_accountDao.findRecentlyDeletedAccounts(null, recentlyDeletedDate, filter);
if ((accounts != null) && !accounts.isEmpty()) {
for (AccountVO account : accounts) {
parsed = parseHelperTables(account, currentStartDate, currentEndDate);
List<Long> publicTemplates = m_usageDao.listPublicTemplatesByAccount(account.getId());
for (Long templateId : publicTemplates) {
//mark public templates owned by deleted accounts as deleted
List<UsageStorageVO> storageVOs = m_usageStorageDao.listById(account.getId(), templateId, StorageTypes.TEMPLATE);
if (storageVOs.size() > 1) {
s_logger.warn("More that one usage entry for storage: " + templateId + " assigned to account: " + account.getId() + "; marking them all as deleted...");
}
for (UsageStorageVO storageVO : storageVOs) {
if (s_logger.isDebugEnabled()) {
s_logger.debug("deleting template: " + storageVO.getId() + " from account: " + storageVO.getAccountId());
}
storageVO.setDeleted(account.getRemoved());
m_usageStorageDao.update(storageVO);
}
}
numAcctsProcessed++;
}
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((accounts != null) && !accounts.isEmpty());
currentStartDate = new Date(currentEndDate.getTime() + 1);
aggregateCal.setTime(currentEndDate);
aggregateCal.add(Calendar.MINUTE, m_aggregationDuration);
currentEndDate = aggregateCal.getTime();
}
if (s_logger.isDebugEnabled()) {
s_logger.debug("processed Usage for " + numAcctsProcessed + " RECENTLY DELETED accts");
}
// do we want to break out of processing accounts and rollback if there are errors?
if (!parsed) {
usageTxn.rollback();
} else {
success = true;
}
} catch (Exception ex) {
s_logger.error("Exception in usage manager", ex);
usageTxn.rollback();
} finally {
// everything seemed to work...set endDate as the last success date
m_usageJobDao.updateJobSuccess(job.getId(), startDateMillis, endDateMillis, System.currentTimeMillis() - timeStart, success);
// create a new job if this is a recurring job
if (job.getJobType() == UsageJobVO.JOB_TYPE_RECURRING) {
m_usageJobDao.createNewJob(m_hostname, m_pid, UsageJobVO.JOB_TYPE_RECURRING);
}
usageTxn.commit();
usageTxn.close();
// switch back to CLOUD_DB
Transaction swap = Transaction.open(Transaction.CLOUD_DB);
if (!success) {
_alertMgr.sendAlert(AlertManager.ALERT_TYPE_USAGE_SERVER_RESULT, 0, new Long(0), "Usage job failed. Job id: " + job.getId(), "Usage job failed. Job id: " + job.getId());
} else {
_alertMgr.clearAlert(AlertManager.ALERT_TYPE_USAGE_SERVER_RESULT, 0, 0);
}
swap.close();
}
} catch (Exception e) {
s_logger.error("Usage Manager error", e);
}
}
use of com.cloud.user.UserStatisticsVO in project cloudstack by apache.
the class DomainRouterDaoImpl method addRouterToGuestNetwork.
@Override
@DB
public void addRouterToGuestNetwork(final VirtualRouter router, final Network guestNetwork) {
if (_routerNetworkDao.findByRouterAndNetwork(router.getId(), guestNetwork.getId()) == null) {
final NetworkOffering off = _offDao.findById(guestNetwork.getNetworkOfferingId());
if (!off.getName().equalsIgnoreCase(NetworkOffering.SystemPrivateGatewayNetworkOffering)) {
final TransactionLegacy txn = TransactionLegacy.currentTxn();
txn.start();
//1) add router to network
final RouterNetworkVO routerNtwkMap = new RouterNetworkVO(router.getId(), guestNetwork.getId(), guestNetwork.getGuestType());
_routerNetworkDao.persist(routerNtwkMap);
//2) create user stats entry for the network
UserStatisticsVO stats = _userStatsDao.findBy(router.getAccountId(), router.getDataCenterId(), guestNetwork.getId(), null, router.getId(), router.getType().toString());
if (stats == null) {
stats = new UserStatisticsVO(router.getAccountId(), router.getDataCenterId(), null, router.getId(), router.getType().toString(), guestNetwork.getId());
_userStatsDao.persist(stats);
}
txn.commit();
}
}
}
use of com.cloud.user.UserStatisticsVO in project cloudstack by apache.
the class UserStatisticsDaoImpl method listUpdatedStats.
@Override
public List<UserStatisticsVO> listUpdatedStats() {
List<UserStatisticsVO> userStats = new ArrayList<UserStatisticsVO>();
TransactionLegacy txn = TransactionLegacy.currentTxn();
try {
PreparedStatement pstmt = null;
pstmt = txn.prepareAutoCloseStatement(UPDATED_STATS_SEARCH);
ResultSet rs = pstmt.executeQuery();
while (rs.next()) {
userStats.add(toEntityBean(rs, false));
}
} catch (Exception ex) {
s_logger.error("error lisitng updated user stats", ex);
}
return userStats;
}
use of com.cloud.user.UserStatisticsVO in project cloudstack by apache.
the class UsageDaoImpl method saveUserStats.
@Override
public void saveUserStats(List<UserStatisticsVO> userStats) {
TransactionLegacy txn = TransactionLegacy.currentTxn();
try {
txn.start();
String sql = INSERT_USER_STATS;
PreparedStatement pstmt = null;
// in reality I just want CLOUD_USAGE dataSource connection
pstmt = txn.prepareAutoCloseStatement(sql);
for (UserStatisticsVO userStat : userStats) {
pstmt.setLong(1, userStat.getId());
pstmt.setLong(2, userStat.getDataCenterId());
pstmt.setLong(3, userStat.getAccountId());
pstmt.setString(4, userStat.getPublicIpAddress());
if (userStat.getDeviceId() != null) {
pstmt.setLong(5, userStat.getDeviceId());
} else {
pstmt.setNull(5, Types.BIGINT);
}
pstmt.setString(6, userStat.getDeviceType());
if (userStat.getNetworkId() != null) {
pstmt.setLong(7, userStat.getNetworkId());
} else {
pstmt.setNull(7, Types.BIGINT);
}
pstmt.setLong(8, userStat.getNetBytesReceived());
pstmt.setLong(9, userStat.getNetBytesSent());
pstmt.setLong(10, userStat.getCurrentBytesReceived());
pstmt.setLong(11, userStat.getCurrentBytesSent());
pstmt.setLong(12, userStat.getAggBytesReceived());
pstmt.setLong(13, userStat.getAggBytesSent());
pstmt.addBatch();
}
pstmt.executeBatch();
txn.commit();
} catch (Exception ex) {
txn.rollback();
s_logger.error("error saving user stats to cloud_usage db", ex);
throw new CloudRuntimeException(ex.getMessage());
}
}
use of com.cloud.user.UserStatisticsVO in project cloudstack by apache.
the class UsageManagerImpl method parse.
@Override
public void parse(UsageJobVO job, long startDateMillis, long endDateMillis) {
// TODO: Shouldn't we also allow parsing by the type of usage?
boolean success = false;
long timeStart = System.currentTimeMillis();
try {
if ((endDateMillis == 0) || (endDateMillis > timeStart)) {
endDateMillis = timeStart;
}
long lastSuccess = _usageJobDao.getLastJobSuccessDateMillis();
if (lastSuccess != 0) {
// 1 millisecond after
startDateMillis = lastSuccess + 1;
}
if (startDateMillis >= endDateMillis) {
if (s_logger.isInfoEnabled()) {
s_logger.info("not parsing usage records since start time mills (" + startDateMillis + ") is on or after end time millis (" + endDateMillis + ")");
}
TransactionLegacy jobUpdateTxn = TransactionLegacy.open(TransactionLegacy.USAGE_DB);
try {
jobUpdateTxn.start();
// everything seemed to work...set endDate as the last success date
_usageJobDao.updateJobSuccess(job.getId(), startDateMillis, endDateMillis, System.currentTimeMillis() - timeStart, success);
// create a new job if this is a recurring job
if (job.getJobType() == UsageJobVO.JOB_TYPE_RECURRING) {
_usageJobDao.createNewJob(_hostname, _pid, UsageJobVO.JOB_TYPE_RECURRING);
}
jobUpdateTxn.commit();
} finally {
jobUpdateTxn.close();
}
return;
}
Date startDate = new Date(startDateMillis);
Date endDate = new Date(endDateMillis);
if (s_logger.isInfoEnabled()) {
s_logger.info("Parsing usage records between " + startDate + " and " + endDate);
}
List<AccountVO> accounts = null;
List<UserStatisticsVO> userStats = null;
Map<String, UsageNetworkVO> networkStats = null;
List<VmDiskStatisticsVO> vmDiskStats = null;
Map<String, UsageVmDiskVO> vmDiskUsages = null;
TransactionLegacy userTxn = TransactionLegacy.open(TransactionLegacy.CLOUD_DB);
try {
Long limit = Long.valueOf(500);
Long offset = Long.valueOf(0);
Long lastAccountId = _usageDao.getLastAccountId();
if (lastAccountId == null) {
lastAccountId = Long.valueOf(0);
}
do {
Filter filter = new Filter(AccountVO.class, "id", true, offset, limit);
accounts = _accountDao.findActiveAccounts(lastAccountId, filter);
if ((accounts != null) && !accounts.isEmpty()) {
// now update the accounts in the cloud_usage db
_usageDao.updateAccounts(accounts);
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((accounts != null) && !accounts.isEmpty());
// reset offset
offset = Long.valueOf(0);
do {
Filter filter = new Filter(AccountVO.class, "id", true, offset, limit);
accounts = _accountDao.findRecentlyDeletedAccounts(lastAccountId, startDate, filter);
if ((accounts != null) && !accounts.isEmpty()) {
// now update the accounts in the cloud_usage db
_usageDao.updateAccounts(accounts);
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((accounts != null) && !accounts.isEmpty());
// reset offset
offset = Long.valueOf(0);
do {
Filter filter = new Filter(AccountVO.class, "id", true, offset, limit);
accounts = _accountDao.findNewAccounts(lastAccountId, filter);
if ((accounts != null) && !accounts.isEmpty()) {
// now copy the accounts to cloud_usage db
_usageDao.saveAccounts(accounts);
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((accounts != null) && !accounts.isEmpty());
// reset offset
offset = Long.valueOf(0);
// get all the user stats to create usage records for the network usage
Long lastUserStatsId = _usageDao.getLastUserStatsId();
if (lastUserStatsId == null) {
lastUserStatsId = Long.valueOf(0);
}
SearchCriteria<UserStatisticsVO> sc2 = _userStatsDao.createSearchCriteria();
sc2.addAnd("id", SearchCriteria.Op.LTEQ, lastUserStatsId);
do {
Filter filter = new Filter(UserStatisticsVO.class, "id", true, offset, limit);
userStats = _userStatsDao.search(sc2, filter);
if ((userStats != null) && !userStats.isEmpty()) {
// now copy the accounts to cloud_usage db
_usageDao.updateUserStats(userStats);
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((userStats != null) && !userStats.isEmpty());
// reset offset
offset = Long.valueOf(0);
sc2 = _userStatsDao.createSearchCriteria();
sc2.addAnd("id", SearchCriteria.Op.GT, lastUserStatsId);
do {
Filter filter = new Filter(UserStatisticsVO.class, "id", true, offset, limit);
userStats = _userStatsDao.search(sc2, filter);
if ((userStats != null) && !userStats.isEmpty()) {
// now copy the accounts to cloud_usage db
_usageDao.saveUserStats(userStats);
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((userStats != null) && !userStats.isEmpty());
// reset offset
offset = Long.valueOf(0);
// get all the vm network stats to create usage_VM_network records for the vm network usage
Long lastVmDiskStatsId = _usageDao.getLastVmDiskStatsId();
if (lastVmDiskStatsId == null) {
lastVmDiskStatsId = Long.valueOf(0);
}
SearchCriteria<VmDiskStatisticsVO> sc4 = _vmDiskStatsDao.createSearchCriteria();
sc4.addAnd("id", SearchCriteria.Op.LTEQ, lastVmDiskStatsId);
do {
Filter filter = new Filter(VmDiskStatisticsVO.class, "id", true, offset, limit);
vmDiskStats = _vmDiskStatsDao.search(sc4, filter);
if ((vmDiskStats != null) && !vmDiskStats.isEmpty()) {
// now copy the accounts to cloud_usage db
_usageDao.updateVmDiskStats(vmDiskStats);
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((vmDiskStats != null) && !vmDiskStats.isEmpty());
// reset offset
offset = Long.valueOf(0);
sc4 = _vmDiskStatsDao.createSearchCriteria();
sc4.addAnd("id", SearchCriteria.Op.GT, lastVmDiskStatsId);
do {
Filter filter = new Filter(VmDiskStatisticsVO.class, "id", true, offset, limit);
vmDiskStats = _vmDiskStatsDao.search(sc4, filter);
if ((vmDiskStats != null) && !vmDiskStats.isEmpty()) {
// now copy the accounts to cloud_usage db
_usageDao.saveVmDiskStats(vmDiskStats);
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((vmDiskStats != null) && !vmDiskStats.isEmpty());
} finally {
userTxn.close();
}
// TODO: Fetch a maximum number of events and process them before moving on to the next range of events
// - get a list of the latest events
// - insert the latest events into the usage.events table
List<UsageEventVO> events = _usageEventDao.getRecentEvents(new Date(endDateMillis));
TransactionLegacy usageTxn = TransactionLegacy.open(TransactionLegacy.USAGE_DB);
try {
usageTxn.start();
// to newest, so just test against the first event)
if ((events != null) && (events.size() > 0)) {
Date oldestEventDate = events.get(0).getCreateDate();
if (oldestEventDate.getTime() < startDateMillis) {
startDateMillis = oldestEventDate.getTime();
startDate = new Date(startDateMillis);
}
// - create the usage records using the parse methods below
for (UsageEventVO event : events) {
event.setProcessed(true);
_usageEventDao.update(event.getId(), event);
createHelperRecord(event);
}
}
// TODO: Fetch a maximum number of user stats and process them before moving on to the next range of user stats
// get user stats in order to compute network usage
networkStats = _usageNetworkDao.getRecentNetworkStats();
Calendar recentlyDeletedCal = Calendar.getInstance(_usageTimezone);
recentlyDeletedCal.setTimeInMillis(startDateMillis);
recentlyDeletedCal.add(Calendar.MINUTE, -1 * THREE_DAYS_IN_MINUTES);
Date recentlyDeletedDate = recentlyDeletedCal.getTime();
// Keep track of user stats for an account, across all of its public IPs
Map<String, UserStatisticsVO> aggregatedStats = new HashMap<String, UserStatisticsVO>();
int startIndex = 0;
do {
userStats = _userStatsDao.listActiveAndRecentlyDeleted(recentlyDeletedDate, startIndex, 500);
if (userStats != null) {
for (UserStatisticsVO userStat : userStats) {
if (userStat.getDeviceId() != null) {
String hostKey = userStat.getDataCenterId() + "-" + userStat.getAccountId() + "-Host-" + userStat.getDeviceId();
UserStatisticsVO hostAggregatedStat = aggregatedStats.get(hostKey);
if (hostAggregatedStat == null) {
hostAggregatedStat = new UserStatisticsVO(userStat.getAccountId(), userStat.getDataCenterId(), userStat.getPublicIpAddress(), userStat.getDeviceId(), userStat.getDeviceType(), userStat.getNetworkId());
}
hostAggregatedStat.setAggBytesSent(hostAggregatedStat.getAggBytesSent() + userStat.getAggBytesSent());
hostAggregatedStat.setAggBytesReceived(hostAggregatedStat.getAggBytesReceived() + userStat.getAggBytesReceived());
aggregatedStats.put(hostKey, hostAggregatedStat);
}
}
}
startIndex += 500;
} while ((userStats != null) && !userStats.isEmpty());
// loop over the user stats, create delta entries in the usage_network helper table
int numAcctsProcessed = 0;
usageNetworks.clear();
for (String key : aggregatedStats.keySet()) {
UsageNetworkVO currentNetworkStats = null;
if (networkStats != null) {
currentNetworkStats = networkStats.get(key);
}
createNetworkHelperEntry(aggregatedStats.get(key), currentNetworkStats, endDateMillis);
numAcctsProcessed++;
}
_usageNetworkDao.saveUsageNetworks(usageNetworks);
if (s_logger.isDebugEnabled()) {
s_logger.debug("created network stats helper entries for " + numAcctsProcessed + " accts");
}
// get vm disk stats in order to compute vm disk usage
vmDiskUsages = _usageVmDiskDao.getRecentVmDiskStats();
// Keep track of user stats for an account, across all of its public IPs
Map<String, VmDiskStatisticsVO> aggregatedDiskStats = new HashMap<String, VmDiskStatisticsVO>();
startIndex = 0;
do {
vmDiskStats = _vmDiskStatsDao.listActiveAndRecentlyDeleted(recentlyDeletedDate, startIndex, 500);
if (vmDiskUsages != null) {
for (VmDiskStatisticsVO vmDiskStat : vmDiskStats) {
if (vmDiskStat.getVmId() != null) {
String hostKey = vmDiskStat.getDataCenterId() + "-" + vmDiskStat.getAccountId() + "-Vm-" + vmDiskStat.getVmId() + "-Disk-" + vmDiskStat.getVolumeId();
VmDiskStatisticsVO hostAggregatedStat = aggregatedDiskStats.get(hostKey);
if (hostAggregatedStat == null) {
hostAggregatedStat = new VmDiskStatisticsVO(vmDiskStat.getAccountId(), vmDiskStat.getDataCenterId(), vmDiskStat.getVmId(), vmDiskStat.getVolumeId());
}
hostAggregatedStat.setAggIORead(hostAggregatedStat.getAggIORead() + vmDiskStat.getAggIORead());
hostAggregatedStat.setAggIOWrite(hostAggregatedStat.getAggIOWrite() + vmDiskStat.getAggIOWrite());
hostAggregatedStat.setAggBytesRead(hostAggregatedStat.getAggBytesRead() + vmDiskStat.getAggBytesRead());
hostAggregatedStat.setAggBytesWrite(hostAggregatedStat.getAggBytesWrite() + vmDiskStat.getAggBytesWrite());
aggregatedDiskStats.put(hostKey, hostAggregatedStat);
}
}
}
startIndex += 500;
} while ((userStats != null) && !userStats.isEmpty());
// loop over the user stats, create delta entries in the usage_disk helper table
numAcctsProcessed = 0;
usageVmDisks.clear();
for (String key : aggregatedDiskStats.keySet()) {
UsageVmDiskVO currentVmDiskStats = null;
if (vmDiskStats != null) {
currentVmDiskStats = vmDiskUsages.get(key);
}
createVmDiskHelperEntry(aggregatedDiskStats.get(key), currentVmDiskStats, endDateMillis);
numAcctsProcessed++;
}
_usageVmDiskDao.saveUsageVmDisks(usageVmDisks);
if (s_logger.isDebugEnabled()) {
s_logger.debug("created vm disk stats helper entries for " + numAcctsProcessed + " accts");
}
// commit the helper records, then start a new transaction
usageTxn.commit();
usageTxn.start();
boolean parsed = false;
numAcctsProcessed = 0;
Date currentStartDate = startDate;
Date currentEndDate = endDate;
Date tempDate = endDate;
Calendar aggregateCal = Calendar.getInstance(_usageTimezone);
while ((tempDate.after(startDate)) && ((tempDate.getTime() - startDate.getTime()) > 60000)) {
currentEndDate = tempDate;
aggregateCal.setTime(tempDate);
aggregateCal.add(Calendar.MINUTE, -_aggregationDuration);
tempDate = aggregateCal.getTime();
}
while (!currentEndDate.after(endDate) || (currentEndDate.getTime() - endDate.getTime() < 60000)) {
Long offset = Long.valueOf(0);
Long limit = Long.valueOf(500);
do {
Filter filter = new Filter(AccountVO.class, "id", true, offset, limit);
accounts = _accountDao.listAll(filter);
if ((accounts != null) && !accounts.isEmpty()) {
for (AccountVO account : accounts) {
parsed = parseHelperTables(account, currentStartDate, currentEndDate);
numAcctsProcessed++;
}
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((accounts != null) && !accounts.isEmpty());
if (s_logger.isDebugEnabled()) {
s_logger.debug("processed VM/Network Usage for " + numAcctsProcessed + " ACTIVE accts");
}
numAcctsProcessed = 0;
// reset offset
offset = Long.valueOf(0);
do {
Filter filter = new Filter(AccountVO.class, "id", true, offset, limit);
accounts = _accountDao.findRecentlyDeletedAccounts(null, recentlyDeletedDate, filter);
if ((accounts != null) && !accounts.isEmpty()) {
for (AccountVO account : accounts) {
parsed = parseHelperTables(account, currentStartDate, currentEndDate);
List<Long> publicTemplates = _usageDao.listPublicTemplatesByAccount(account.getId());
for (Long templateId : publicTemplates) {
//mark public templates owned by deleted accounts as deleted
List<UsageStorageVO> storageVOs = _usageStorageDao.listById(account.getId(), templateId, StorageTypes.TEMPLATE);
if (storageVOs.size() > 1) {
s_logger.warn("More that one usage entry for storage: " + templateId + " assigned to account: " + account.getId() + "; marking them all as deleted...");
}
for (UsageStorageVO storageVO : storageVOs) {
if (s_logger.isDebugEnabled()) {
s_logger.debug("deleting template: " + storageVO.getId() + " from account: " + storageVO.getAccountId());
}
storageVO.setDeleted(account.getRemoved());
_usageStorageDao.update(storageVO);
}
}
numAcctsProcessed++;
}
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((accounts != null) && !accounts.isEmpty());
currentStartDate = new Date(currentEndDate.getTime() + 1);
aggregateCal.setTime(currentEndDate);
aggregateCal.add(Calendar.MINUTE, _aggregationDuration);
currentEndDate = aggregateCal.getTime();
}
if (s_logger.isDebugEnabled()) {
s_logger.debug("processed Usage for " + numAcctsProcessed + " RECENTLY DELETED accts");
}
// do we want to break out of processing accounts and rollback if there are errors?
if (!parsed) {
usageTxn.rollback();
} else {
success = true;
}
} catch (Exception ex) {
s_logger.error("Exception in usage manager", ex);
usageTxn.rollback();
} finally {
// everything seemed to work...set endDate as the last success date
_usageJobDao.updateJobSuccess(job.getId(), startDateMillis, endDateMillis, System.currentTimeMillis() - timeStart, success);
// create a new job if this is a recurring job
if (job.getJobType() == UsageJobVO.JOB_TYPE_RECURRING) {
_usageJobDao.createNewJob(_hostname, _pid, UsageJobVO.JOB_TYPE_RECURRING);
}
usageTxn.commit();
usageTxn.close();
// switch back to CLOUD_DB
TransactionLegacy swap = TransactionLegacy.open(TransactionLegacy.CLOUD_DB);
if (!success) {
_alertMgr.sendAlert(AlertManager.AlertType.ALERT_TYPE_USAGE_SERVER_RESULT, 0, new Long(0), "Usage job failed. Job id: " + job.getId(), "Usage job failed. Job id: " + job.getId());
} else {
_alertMgr.clearAlert(AlertManager.AlertType.ALERT_TYPE_USAGE_SERVER_RESULT, 0, 0);
}
swap.close();
}
} catch (Exception e) {
s_logger.error("Usage Manager error", e);
}
}
Aggregations