use of org.ovirt.engine.core.common.utils.Pair in project ovirt-engine by oVirt.
the class GlusterHookStatusChangeCommand method executeCommand.
@Override
protected void executeCommand() {
entity = getGlusterHook();
addCustomValue(GlusterConstants.HOOK_NAME, entity.getName());
if (getAllUpServers().size() < clusterUtils.getServerCount(getGlusterHook().getClusterId())) {
errors.add(EngineMessage.CLUSTER_ALL_SERVERS_NOT_UP.toString());
}
List<Callable<Pair<VDS, VDSReturnValue>>> taskList = new ArrayList<>();
for (final VDS upServer : getAllUpServers()) {
taskList.add(() -> {
VDSReturnValue returnValue = runVdsCommand(getStatusChangeVDSCommand(), new GlusterHookVDSParameters(upServer.getId(), entity.getGlusterCommand(), entity.getStage(), entity.getName()));
return new Pair<>(upServer, returnValue);
});
}
boolean atLeastOneSuccess = false;
List<Pair<VDS, VDSReturnValue>> pairResults = ThreadPoolUtil.invokeAll(taskList);
for (Pair<VDS, VDSReturnValue> pairResult : pairResults) {
VDSReturnValue retValue = pairResult.getSecond();
if (retValue.getSucceeded()) {
atLeastOneSuccess = true;
// update status in database
updateServerHookStatusInDb(getGlusterHook().getId(), pairResult.getFirst().getId(), getNewStatus());
} else {
errors.add(retValue.getVdsError().getMessage());
}
}
setSucceeded(atLeastOneSuccess);
if (errors.size() > 0) {
// conflict in status
entity.addStatusConflict();
handleVdsErrors(getAuditLogTypeValue(), errors);
addCustomValue(GlusterConstants.FAILURE_MESSAGE, StringUtils.join(errors, System.lineSeparator()));
}
// The intention was to enable/disable hook. So we update the entity with new status if command succeeded
if (getSucceeded()) {
entity.setStatus(getNewStatus());
// no longer conflicts as all hooks have same status
entity.removeStatusConflict();
updateHookInDb(entity);
if (entity.getConflictStatus() == 0) {
// all conflicts have been resolved, remove server hooks
glusterHooksDao.removeGlusterServerHooks(entity.getId());
}
}
}
use of org.ovirt.engine.core.common.utils.Pair in project ovirt-engine by oVirt.
the class GlusterHookSyncJob method saveHookContent.
private void saveHookContent(List<Callable<Pair<GlusterHookEntity, VDSReturnValue>>> contentTasksList) {
if (contentTasksList.isEmpty()) {
return;
}
List<Pair<GlusterHookEntity, VDSReturnValue>> pairResults = ThreadPoolUtil.invokeAll(contentTasksList);
for (Pair<GlusterHookEntity, VDSReturnValue> pairResult : pairResults) {
final GlusterHookEntity hook = pairResult.getFirst();
if (!pairResult.getSecond().getSucceeded()) {
log.info("Failed to get content of hook '{}' with error: {}", hook.getHookKey(), pairResult.getSecond().getVdsError().getMessage());
logMessage(hook.getClusterId(), hook.getHookKey(), AuditLogType.GLUSTER_HOOK_GETCONTENT_FAILED);
continue;
}
final String content = (String) pairResult.getSecond().getReturnValue();
hooksDao.updateGlusterHookContent(hook.getId(), hook.getChecksum(), content);
}
}
use of org.ovirt.engine.core.common.utils.Pair in project ovirt-engine by oVirt.
the class CreateBrickCommand method runAnsibleCreateBrickPlaybook.
private void runAnsibleCreateBrickPlaybook() throws IOException, InterruptedException {
List<String> disks = new ArrayList<>();
Double totalSize = 0.0;
for (StorageDevice device : getParameters().getDisks()) {
disks.add(device.getDevPath());
// size is returned in MiB
totalSize += device.getSize();
}
if (totalSize < MIN_VG_SIZE) {
totalSize = totalSize - MIN_METADATA_PERCENT * totalSize;
} else {
totalSize = totalSize - DEFAULT_METADATA_SIZE_MB;
}
Pair<SizeUnit, Double> convertedSize = SizeConverter.autoConvert(totalSize.longValue(), SizeUnit.MiB);
String deviceSize = convertedSize.getSecond() + convertedSize.getFirst().toString();
String ssdDevice = "";
if (getParameters().getCacheDevice() != null) {
ssdDevice = getParameters().getCacheDevice().getDevPath();
}
int diskCount = getParameters().getNoOfPhysicalDisksInRaidVolume() == null ? 1 : getParameters().getNoOfPhysicalDisksInRaidVolume();
AnsibleCommandBuilder command = new AnsibleCommandBuilder().hostnames(getVds().getHostName()).variables(new Pair<>("ssd", ssdDevice), new Pair<>("disks", JsonHelper.objectToJson(disks, false)), new Pair<>("vgname", "RHGS_vg_" + getParameters().getLvName()), new Pair<>("size", deviceSize), new Pair<>("diskcount", diskCount), new Pair<>("stripesize", getParameters().getStripeSize()), new Pair<>("wipefs", "yes"), new Pair<>("disktype", getParameters().getRaidType().toString()), new Pair<>("lvname", getParameters().getLvName() + "_lv"), new Pair<>("cache_lvname", getParameters().getLvName() + "_cache_lv"), new Pair<>("cache_lvsize", getParameters().getCacheSize() + "GiB"), new Pair<>("cachemode", getParameters().getCacheMode()), new Pair<>("fstype", GlusterConstants.FS_TYPE_XFS), new Pair<>("mntpath", getParameters().getMountPoint())).logFileDirectory(CreateBrickCommand.CREATE_BRICK_LOG_DIRECTORY).logFilePrefix("ovirt-gluster-brick-ansible").logFileName(getVds().getHostName()).logFileSuffix(getCorrelationId()).playbook(AnsibleConstants.CREATE_BRICK_PLAYBOOK);
AnsibleReturnValue ansibleReturnValue = ansibleExecutor.runCommand(command);
if (ansibleReturnValue.getAnsibleReturnCode() != AnsibleReturnCode.OK) {
log.error("Failed to execute Ansible create brick role. Please check logs for more details: {}", command.logFile());
throw new EngineException(EngineError.GeneralException, String.format("Failed to execute Ansible create brick role. Please check logs for more details: %1$s", command.logFile()));
}
}
use of org.ovirt.engine.core.common.utils.Pair in project ovirt-engine by oVirt.
the class ProcessOvfUpdateForStorageDomainCommand method buildOvfInfoFileByteArray.
private byte[] buildOvfInfoFileByteArray(List<Guid> vmAndTemplatesIds) {
ByteArrayOutputStream bufferedOutputStream = new ByteArrayOutputStream();
Set<Guid> processedIds = new HashSet<>();
try (InMemoryTar inMemoryTar = new InMemoryTar(bufferedOutputStream)) {
inMemoryTar.addTarEntry(generateInfoFileData().getBytes(), OvfInfoFileConstants.InfoFileName);
Map<String, Object> metaDataForEntities = generateMetaDataFile(vmAndTemplatesIds);
int i = 0;
while (i < vmAndTemplatesIds.size()) {
int size = Math.min(StorageConstants.OVF_MAX_ITEMS_PER_SQL_STATEMENT, vmAndTemplatesIds.size() - i);
List<Guid> idsToProcess = vmAndTemplatesIds.subList(i, i + size);
i += size;
List<Pair<Guid, String>> ovfs = vmAndTemplatesGenerationsDao.loadOvfDataForIds(idsToProcess);
if (!ovfs.isEmpty()) {
processedIds.addAll(buildFilesForOvfs(ovfs, inMemoryTar));
}
}
List<Pair<Guid, String>> unprocessedOvfData = retrieveUnprocessedUnregisteredOvfData(processedIds, metaDataForEntities);
inMemoryTar.addTarEntry(buildJson(metaDataForEntities, true).getBytes(), OvfInfoFileConstants.MetaDataFileName);
buildFilesForOvfs(unprocessedOvfData, inMemoryTar);
} catch (Exception e) {
throw new RuntimeException(String.format("Exception while building in memory tar of the OVFs of domain %s", getParameters().getStorageDomainId()), e);
}
return bufferedOutputStream.toByteArray();
}
use of org.ovirt.engine.core.common.utils.Pair in project ovirt-engine by oVirt.
the class RefreshLunsSizeCommand method getDeviceListAllVds.
/**
* This method calls GetDeviceList with the specified luns on all hosts.
* In VDSM , this call will resize the devices if needed.
* It returns a map of LUN ID to a list of Pair(VDS,LUNs)
* This map will help to check if all hosts are seeing the same size of the LUNs.
*/
private Map<String, List<Pair<VDS, LUNs>>> getDeviceListAllVds(Set<String> lunsToResize) {
Map<String, List<Pair<VDS, LUNs>>> lunToVds = new HashMap<>();
for (VDS vds : getAllRunningVdssInPool()) {
GetDeviceListVDSCommandParameters parameters = new GetDeviceListVDSCommandParameters(vds.getId(), getStorageDomain().getStorageType(), false, lunsToResize);
List<LUNs> luns = (List<LUNs>) runVdsCommand(VDSCommandType.GetDeviceList, parameters).getReturnValue();
for (LUNs lun : luns) {
lunToVds.computeIfAbsent(lun.getLUNId(), k -> new ArrayList<>()).add(new Pair<>(vds, lun));
}
}
return lunToVds;
}
Aggregations