use of org.ovirt.engine.core.utils.lock.EngineLock in project ovirt-engine by oVirt.
the class VdsEventListener method restartVmsWithLease.
@Override
public void restartVmsWithLease(List<Guid> vmIds) {
if (vmIds.isEmpty()) {
return;
}
EngineLock engineLock = new EngineLock(Collections.emptyMap(), Collections.emptyMap());
ThreadPoolUtil.execute(() -> {
for (Guid vmId : vmIds) {
resourceManagerProvider.get().removeAsyncRunningVm(vmId);
backend.runInternalAction(ActionType.RunVm, buildRunVmParameters(vmId), ExecutionHandler.createInternalJobContext(engineLock));
}
});
}
use of org.ovirt.engine.core.utils.lock.EngineLock in project ovirt-engine by oVirt.
the class RemoveVdsCommand method glusterHostRemove.
private void glusterHostRemove() {
if (clusterHasMultipleHosts() && !hasVolumeBricksOnServer()) {
try (EngineLock lock = glusterUtil.acquireGlusterLockWait(getClusterId())) {
VDSReturnValue returnValue = runVdsCommand(VDSCommandType.RemoveGlusterServer, new RemoveGlusterServerVDSParameters(upServer.getId(), getVds().getHostName(), getParameters().isForceAction()));
// If the host is already removed Cluster using Gluster CLI then we can setSucceeded to true.
setSucceeded(returnValue.getSucceeded() || EngineError.GlusterHostIsNotPartOfCluster == returnValue.getVdsError().getCode());
if (!getSucceeded()) {
if (returnValue.getVdsError().getCode() == EngineError.GlusterHostRemoveFailedException) {
List<GlusterServerInfo> glusterServers = getGlusterPeers(upServer);
if (glusterServers != null) {
if (!glusterUtil.isHostExists(glusterServers, getVds())) {
setSucceeded(true);
}
}
}
if (!getSucceeded()) {
getReturnValue().getFault().setError(returnValue.getVdsError().getCode());
getReturnValue().getFault().setMessage(returnValue.getVdsError().getMessage());
errorType = AuditLogType.GLUSTER_SERVER_REMOVE_FAILED;
return;
}
}
// if last but one host in cluster, update the last host's known addresses
if (clusterUtils.getServerCount(getClusterId()) == 2) {
removeOtherKnowAddressesForGlusterServer(upServer.getId());
}
}
}
}
use of org.ovirt.engine.core.utils.lock.EngineLock in project ovirt-engine by oVirt.
the class InitGlusterCommandHelper method initGlusterPeerProcess.
/**
* This method executes a "gluster peer probe" to add the newly added host to the cluster - this
* is done only if there's another UP server in cluster and the host being added is not already
* part of the UP server's peer list.
* Also, acquiring a wait lock only during a gluster peer process (wait as there's periodic job that also
* acquires lock.
*/
private boolean initGlusterPeerProcess(VDS vds) {
// condition.
try (EngineLock lock = glusterUtil.acquireGlusterLockWait(vds.getClusterId())) {
Map<String, String> customLogValues = new HashMap<>();
List<VDS> vdsList = vdsDao.getAllForClusterWithStatus(vds.getClusterId(), VDSStatus.Up);
// If the cluster already having Gluster servers, get an up server
if (!vdsList.isEmpty()) {
// If new server is not part of the existing gluster peers, add into peer group
Optional<VDS> potentialUpServer = vdsList.stream().filter(existingVds -> !vds.getId().equals(existingVds.getId())).findFirst();
if (potentialUpServer.isPresent()) {
VDS upServer = potentialUpServer.get();
List<GlusterServerInfo> glusterServers = getGlusterPeers(upServer);
customLogValues.put("Server", upServer.getHostName());
if (glusterServers.isEmpty()) {
customLogValues.put("Command", "gluster peer status");
setNonOperational(vds, NonOperationalReason.GLUSTER_COMMAND_FAILED, customLogValues);
return false;
} else if (!glusterUtil.isHostExists(glusterServers, vds)) {
if (!glusterPeerProbe(vds, upServer.getId(), vds.getHostName())) {
customLogValues.put("Command", "gluster peer probe " + vds.getHostName());
setNonOperational(vds, NonOperationalReason.GLUSTER_COMMAND_FAILED, customLogValues);
return false;
}
int retries = 0;
while (retries < getMaxRetriesGlusterProbeStatus()) {
// though gluster peer probe succeeds, it takes some time for the host to be
// listed as a peer. Return success only when the host is acknowledged as peer
// from another upServer.
VDS newUpServer = getNewUpServer(vds, upServer);
if (newUpServer == null) {
// there's no other up server. so there's no issue with peer status results
return true;
}
List<GlusterServerInfo> newGlusterServers = getGlusterPeers(newUpServer);
if (!glusterUtil.isHostExists(newGlusterServers, vds)) {
log.info("Failed to find host '{}' in gluster peer list from '{}' on attempt {}", vds, newUpServer, ++retries);
// if num of attempts done
if (retries == getMaxRetriesGlusterProbeStatus()) {
customLogValues.put("Command", "gluster peer status " + vds.getHostName());
setNonOperational(vds, NonOperationalReason.GLUSTER_COMMAND_FAILED, customLogValues);
return false;
}
try {
// give time for gluster peer probe to propogate to servers.
Thread.sleep(1000);
} catch (Exception e) {
log.error(e.getMessage());
break;
}
} else {
return true;
}
}
}
}
}
return true;
}
}
use of org.ovirt.engine.core.utils.lock.EngineLock in project ovirt-engine by oVirt.
the class ActivateVdsCommand method executeCommand.
@Override
protected void executeCommand() {
final VDS vds = getVds();
try (EngineLock monitoringLock = acquireMonitorLock("Activate host")) {
executionHandler.updateSpecificActionJobCompleted(vds.getId(), ActionType.MaintenanceVds, false);
setSucceeded(setVdsStatus(VDSStatus.Unassigned).getSucceeded());
if (getSucceeded()) {
TransactionSupport.executeInNewTransaction(() -> {
// set network to operational / non-operational
List<Network> networks = networkDao.getAllForCluster(vds.getClusterId());
networkClusterHelper.setStatus(vds.getClusterId(), networks);
return null;
});
// Start glusterd service on the node, which would haven been stopped due to maintenance
if (vds.getClusterSupportsGlusterService()) {
runVdsCommand(VDSCommandType.ManageGlusterService, new GlusterServiceVDSParameters(vds.getId(), Arrays.asList("glusterd"), "restart"));
}
}
}
}
use of org.ovirt.engine.core.utils.lock.EngineLock in project ovirt-engine by oVirt.
the class CommandBase method acquireLockAndWait.
private void acquireLockAndWait() {
// if commandLock is null then we acquire new lock, otherwise probably we got lock from caller command.
if (context.getLock() == null) {
Map<String, Pair<String, String>> exclusiveLocks = getExclusiveLocks();
if (exclusiveLocks != null) {
EngineLock lock = new EngineLock(exclusiveLocks, null);
log.info("Before acquiring and wait lock '{}'", lock);
lockManager.acquireLockWait(lock);
context.withLock(lock);
log.info("Lock-wait acquired to object '{}'", lock);
}
}
}
Aggregations