use of com.cloud.cluster.agentlb.HostTransferMapVO in project cloudstack by apache.
the class ClusteredAgentManagerImpl method getTransferScanTask.
private Runnable getTransferScanTask() {
return new ManagedContextRunnable() {
@Override
protected void runInContext() {
try {
if (s_logger.isTraceEnabled()) {
s_logger.trace("Clustered agent transfer scan check, management server id:" + _nodeId);
}
synchronized (_agentToTransferIds) {
if (_agentToTransferIds.size() > 0) {
s_logger.debug("Found " + _agentToTransferIds.size() + " agents to transfer");
// for (Long hostId : _agentToTransferIds) {
for (final Iterator<Long> iterator = _agentToTransferIds.iterator(); iterator.hasNext(); ) {
final Long hostId = iterator.next();
final AgentAttache attache = findAttache(hostId);
// if the thread:
// 1) timed out waiting for the host to reconnect
// 2) recipient management server is not active any more
// 3) if the management server doesn't own the host any more
// remove the host from re-balance list and delete from op_host_transfer DB
// no need to do anything with the real attache as we haven't modified it yet
final Date cutTime = DateUtil.currentGMTTime();
final HostTransferMapVO transferMap = _hostTransferDao.findActiveHostTransferMapByHostId(hostId, new Date(cutTime.getTime() - rebalanceTimeOut));
if (transferMap == null) {
s_logger.debug("Timed out waiting for the host id=" + hostId + " to be ready to transfer, skipping rebalance for the host");
iterator.remove();
_hostTransferDao.completeAgentTransfer(hostId);
continue;
}
if (transferMap.getInitialOwner() != _nodeId || attache == null || attache.forForward()) {
s_logger.debug("Management server " + _nodeId + " doesn't own host id=" + hostId + " any more, skipping rebalance for the host");
iterator.remove();
_hostTransferDao.completeAgentTransfer(hostId);
continue;
}
final ManagementServerHostVO ms = _mshostDao.findByMsid(transferMap.getFutureOwner());
if (ms != null && ms.getState() != ManagementServerHost.State.Up) {
s_logger.debug("Can't transfer host " + hostId + " as it's future owner is not in UP state: " + ms + ", skipping rebalance for the host");
iterator.remove();
_hostTransferDao.completeAgentTransfer(hostId);
continue;
}
if (attache.getQueueSize() == 0 && attache.getNonRecurringListenersSize() == 0) {
iterator.remove();
try {
_executor.execute(new RebalanceTask(hostId, transferMap.getInitialOwner(), transferMap.getFutureOwner()));
} catch (final RejectedExecutionException ex) {
s_logger.warn("Failed to submit rebalance task for host id=" + hostId + "; postponing the execution");
continue;
}
} else {
s_logger.debug("Agent " + hostId + " can't be transfered yet as its request queue size is " + attache.getQueueSize() + " and listener queue size is " + attache.getNonRecurringListenersSize());
}
}
} else {
if (s_logger.isTraceEnabled()) {
s_logger.trace("Found no agents to be transfered by the management server " + _nodeId);
}
}
}
} catch (final Throwable e) {
s_logger.error("Problem with the clustered agent transfer scan check!", e);
}
}
};
}
use of com.cloud.cluster.agentlb.HostTransferMapVO in project cloudstack by apache.
the class ClusteredAgentManagerImpl method startRebalanceAgents.
public void startRebalanceAgents() {
s_logger.debug("Management server " + _nodeId + " is asking other peers to rebalance their agents");
final List<ManagementServerHostVO> allMS = _mshostDao.listBy(ManagementServerHost.State.Up);
final QueryBuilder<HostVO> sc = QueryBuilder.create(HostVO.class);
sc.and(sc.entity().getManagementServerId(), Op.NNULL);
sc.and(sc.entity().getType(), Op.EQ, Host.Type.Routing);
final List<HostVO> allManagedAgents = sc.list();
int avLoad = 0;
if (!allManagedAgents.isEmpty() && !allMS.isEmpty()) {
avLoad = allManagedAgents.size() / allMS.size();
} else {
if (s_logger.isDebugEnabled()) {
s_logger.debug("There are no hosts to rebalance in the system. Current number of active management server nodes in the system is " + allMS.size() + "; number of managed agents is " + allManagedAgents.size());
}
return;
}
if (avLoad == 0L) {
if (s_logger.isDebugEnabled()) {
s_logger.debug("As calculated average load is less than 1, rounding it to 1");
}
avLoad = 1;
}
for (final ManagementServerHostVO node : allMS) {
if (node.getMsid() != _nodeId) {
List<HostVO> hostsToRebalance = new ArrayList<HostVO>();
for (final AgentLoadBalancerPlanner lbPlanner : _lbPlanners) {
hostsToRebalance = lbPlanner.getHostsToRebalance(node.getMsid(), avLoad);
if (hostsToRebalance != null && !hostsToRebalance.isEmpty()) {
break;
} else {
s_logger.debug("Agent load balancer planner " + lbPlanner.getName() + " found no hosts to be rebalanced from management server " + node.getMsid());
}
}
if (hostsToRebalance != null && !hostsToRebalance.isEmpty()) {
s_logger.debug("Found " + hostsToRebalance.size() + " hosts to rebalance from management server " + node.getMsid());
for (final HostVO host : hostsToRebalance) {
final long hostId = host.getId();
s_logger.debug("Asking management server " + node.getMsid() + " to give away host id=" + hostId);
boolean result = true;
if (_hostTransferDao.findById(hostId) != null) {
s_logger.warn("Somebody else is already rebalancing host id: " + hostId);
continue;
}
HostTransferMapVO transfer = null;
try {
transfer = _hostTransferDao.startAgentTransfering(hostId, node.getMsid(), _nodeId);
final Answer[] answer = sendRebalanceCommand(node.getMsid(), hostId, node.getMsid(), _nodeId, Event.RequestAgentRebalance);
if (answer == null) {
s_logger.warn("Failed to get host id=" + hostId + " from management server " + node.getMsid());
result = false;
}
} catch (final Exception ex) {
s_logger.warn("Failed to get host id=" + hostId + " from management server " + node.getMsid(), ex);
result = false;
} finally {
if (transfer != null) {
final HostTransferMapVO transferState = _hostTransferDao.findByIdAndFutureOwnerId(transfer.getId(), _nodeId);
if (!result && transferState != null && transferState.getState() == HostTransferState.TransferRequested) {
if (s_logger.isDebugEnabled()) {
s_logger.debug("Removing mapping from op_host_transfer as it failed to be set to transfer mode");
}
// just remove the mapping (if exists) as nothing was done on the peer management
// server yet
_hostTransferDao.remove(transfer.getId());
}
}
}
}
} else {
s_logger.debug("Found no hosts to rebalance from the management server " + node.getMsid());
}
}
}
}
use of com.cloud.cluster.agentlb.HostTransferMapVO in project cloudstack by apache.
the class ClusteredAgentManagerImpl method cleanupTransferMap.
protected void cleanupTransferMap(final long msId) {
final List<HostTransferMapVO> hostsJoingingCluster = _hostTransferDao.listHostsJoiningCluster(msId);
for (final HostTransferMapVO hostJoingingCluster : hostsJoingingCluster) {
_hostTransferDao.remove(hostJoingingCluster.getId());
}
final List<HostTransferMapVO> hostsLeavingCluster = _hostTransferDao.listHostsLeavingCluster(msId);
for (final HostTransferMapVO hostLeavingCluster : hostsLeavingCluster) {
_hostTransferDao.remove(hostLeavingCluster.getId());
}
}
use of com.cloud.cluster.agentlb.HostTransferMapVO in project cloudstack by apache.
the class HostTransferMapDaoImpl method startAgentTransfer.
@Override
public boolean startAgentTransfer(long hostId) {
HostTransferMapVO transfer = findById(hostId);
transfer.setState(HostTransferState.TransferStarted);
return update(hostId, transfer);
}
Aggregations